source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
client.py
|
from cryptography.fernet import Fernet
import sys
import socket
import select
import errno
import threading
class MoleClient:
def __init__(self, ip="127.0.0.1", port=1234, header_length=10, physical_key_file="./PHYSICAL_KEY", encoding="utf8"):
self.ip = ip
self.port = port
self.header_length = header_length
self.encoding = encoding
self.fernet = None
with open(physical_key_file) as f:
key = f.readline().encode(self.encoding)
self.fernet = Fernet(key)
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.connect((self.ip, self.port))
self.client_socket.setblocking(True)
self.username = input("Username: ")
self.raw_send(self.username)
def raw_send(self, data, encrypt=False):
data = data.encode(self.encoding)
if encrypt:
data = self.fernet.encrypt(data)
header = f"{len(data):<{self.header_length}}".encode(self.encoding)
self.client_socket.send(header + data)
def raw_recv(self, decrypt=False):
header = self.client_socket.recv(self.header_length)
if not len(header):
print("MOLE: Connection closed by the server!")
sys.exit()
data_lenght = int(header.decode(self.encoding).strip())
data = self.client_socket.recv(data_lenght)
if decrypt:
data = self.fernet.decrypt(data)
return data.decode(self.encoding)
def sending(self):
while True:
message = input(f"{self.username} > ")
if message:
self.raw_send(message)
def recieving(self):
while True:
try:
while True:
user = self.raw_recv()
message = self.raw_recv()
print()
print(f"{user} > {message}")
except IOError as e:
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
print("MOLE: reading error!", str(e))
sys.exit()
continue
except Exception as e:
print("MOLE: general error!", str(e))
sys.exit()
if __name__ == "__main__":
client = MoleClient()
sender = threading.Thread(target=client.sending)
reciever = threading.Thread(target=client.recieving)
sender.start()
reciever.start()
|
libra.py
|
import serial
import sys
import threading
import queue
import datetime
import subprocess
import requests
import time
# Commands
CMD_CONT_READ = "SIR\r\n".encode("ascii")
CMD_SET_TARE = "T\r\n".encode("ascii")
CMD_CALIBRATE_SETTINGS = "C0\r\n".encode("ascii")
CMD_CALIBRATE_SET_SETTINGS = "C0 0 1\r\n".encode("ascii")
CMD_CALIBRATE_INIT_CALIB = "C2\r\n".encode("ascii")
# Return options
STABLE = "S"
UNSTABLE = "SD"
# Files
COUNTING_FILE = "counting.csv"
ALL_FILE = "data.csv"
# Units
GRAM = "g"
# NaN used for stabilization time while unstable
NAN = float("nan")
# Used for determining which type of counting a user wants
COUNT_ROW = "in_row"
COUNT_ONCE = "once"
class Libra():
ser = None # serial to communicate with libra
mutex = None # lock for serial port availability
thread_cont_read = None # thread for constant reading
thread_writefile = None # thread for writing data to file, should always be running
queue_cont_read = None # queue for storing SIR weight data
queue_backup = queue.Queue() # same as queue_cont_read but only GUI can empty
queue_special = None # used for anything else
queue_writefile = None # queue for writing data to file
env_data = None # stores a dictionary of environment data (humidity, temperature, and pressure)
# Custom signals
STOP_COUNTING = False
STOP_MAIN = False
STOP_WRITE = False
def __init__(self, port=None, baudrate=None, bytesize=None, parity=None, stopbits=None, xonxoff=None):
self.current_tare = 0.00 # current tare setting
self.stabilization_time = NAN # time from first UNSTABLE to first STABLE, initially on 0
self.stabilization_time_start = None # time of first UNSTABLE
self.count_results_row = 0 # Used for getting results of counting, either number of pieces in a row or at once present
self.count_results_once = 0
self.target = ""
self.all_file = ALL_FILE
self.queue_stdout = queue.Queue()
if port is not None:
try:
self.openSerial(port, baudrate, bytesize, parity, stopbits, xonxoff)
except:
self.queue_stdout.put("Serial port error")
self.queue_cont_read = queue.Queue()
self.queue_backup = queue.Queue()
self.queue_writefile = queue.Queue()
self.thread_writefile = threading.Thread(
target=self.writefile,
name="writefile",
daemon=True
)
self.thread_writefile.start()
self.getEnvData()
def __str__(self):
self.queue_stdout.put("Libra on port {0} with following configuration:\n\
\tPORT = {1}\n\
\tBAUDRATE = {2}\n\
\tBYTESIZE = {3}\n\
\tPARITY = {4}\n\
\tSTOPBITS = {5}\n\
\tXONXOFF = {6}\n")
def getEnvData(self, p="Zračni tlak: ", h="Vlažnost zraka: ", t="LJUBLJANA: "):
data = requests.get(
"http://meteo.arso.gov.si/uploads/probase/www/observ/surface/text/sl/observationAms_LJUBL-ANA_BEZIGRAD_latest.rss")
env_data = {}
i = data.text.find(p)
env_data["pressure"] = data.text[i + len(p):i + len(p) + 4] + " mbar"
i = data.text.find(h)
env_data["humidity"] = data.text[i + len(h):i + len(h) + 2] + " %"
i = data.text.find(t)
env_data["temperature"] = data.text[i + len(t):i + len(t) + 2] + " °C"
self.env_data = env_data
return env_data
def __str__(self):
self.queue_stdout.put("Libra on port {0} with following configuration:\n\
\tPORT = {1}\n\
\tBAUDRATE = {2}\n\
\tBYTESIZE = {3}\n\
\tPARITY = {4}\n\
\tSTOPBITS = {5}\n\
\tXONXOFF = {6}\n")
def openSerial(self, port, baudrate, bytesize, parity, stopbits, xonxoff):
self.ser = serial.Serial(
port=port,
baudrate=baudrate,
bytesize=bytesize,
parity=parity,
stopbits=stopbits,
xonxoff=xonxoff
)
self.current_tare = 0 #self.getTareFromScale() # get initial tare value
self.mutex = threading.Lock()
self.startReadCont()
def startReadCont(self):
self.STOP_MAIN = False
assert self.ser is not None, "[startReadCont] Not connected to serial port"
if self.thread_cont_read is None:
self.thread_cont_read = threading.Thread(
target=self.readCont,
name="cont_read",
daemon=True
)
self.mutex.acquire()
self.thread_cont_read.start() # when killing this process, release lock
self.queue_stdout.put("thread_cont_read started!")
def processRead(self, string):
string = string.decode('ascii').strip().split()
return [datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")] + string
def readCont(self):
self.ser.write(CMD_CONT_READ)
while True:
if self.STOP_MAIN:
break
now = datetime.datetime.now()
str_read = self.ser.read_until(serial.CR+serial.LF)
str_read = self.processRead(str_read)
self.queue_cont_read.put(str_read)
self.queue_backup.put(str_read)
if self.stabilization_time_start is None and str_read[1] == UNSTABLE:
self.stabilization_time = NAN
self.stabilization_time_start = now
elif str_read[1] == STABLE and self.stabilization_time_start is not None:
timediff = now - self.stabilization_time_start
self.stabilization_time_start = None
self.stabilization_time = timediff.seconds + round(timediff.microseconds/10**6, 3)
self.queue_writefile.put(str_read+[str(self.stabilization_time)]+[self.env_data["pressure"], self.env_data["humidity"], self.env_data["temperature"]])
def countApi(self, method,stop=False,target=None):
self.thread_count_stop = stop
if stop:
self.queue_stdout.put("[countApi] exit")
return
self.queue_stdout.put("[countApi] Starting thread with method " + method)
if method == COUNT_ROW:
self.thread_count = threading.Thread(target=self.countObjectsInRow, name="countAPI", daemon=True)
elif method == COUNT_ONCE:
self.thread_count = threading.Thread(target=self.countObjectsAtOnce, name="countAPI", daemon=True, args=[target])
else:
self.queue_stdout.put("[countApi] Unknown method ...")
return
self.thread_count.start()
def countObjectsInRow(self):
self.queue_stdout.put("[countObjectsInRow] Waiting for stable zero ...")
while not self.thread_count_stop:
m = self.queue_cont_read.get()
if m[1] == STABLE and float(m[2]) < 0.1:
break
self.queue_stdout.put("[countObjectsInRow] Stable zero acquired, start weighting ...")
objects = []
new = False
while not self.thread_count_stop:
if self.STOP_COUNTING or self.STOP_MAIN:
break
m = self.queue_cont_read.get()
if m[1] == STABLE and new and float(m[2]) > 0.1:
new = False
objects.append(m)
self.queue_stdout.put('beep')
elif m[1] == UNSTABLE:
new = True
try:
id_counting = str(int(subprocess.check_output(["tail", "-1", COUNTING_FILE]).split(',')[0])+1)
except:
id_counting = "0"
f = open(COUNTING_FILE, mode="a+")
for obj in objects:
str_filewrite = id_counting + "," + ",".join(obj) + "\n"
if f.write(str_filewrite) != len(str_filewrite):
self.queue_stdout.put("[countObjectsInRow] failed to write object:\n\t{}\nto file".format(str_filewrite))
f.close()
self.count_results_row = len(objects)
def countObjectsAtOnce(self, target_weight=None):
self.target = None
if target_weight is None: # we need to get stable weight of an object unless it was already supplied
self.queue_stdout.put("[countObjectsAtOnce] Waiting for stable weight ...")
while True:
m = self.queue_cont_read.get()
if m[1] == STABLE and float(m[2]) > 0.1:
self.target = float(m[2])
break
else:
self.target = target_weight
self.queue_stdout.put("[countObjectsAtOnce] Stable weight acquired, target weight is {0}".format(self.target))
self.queue_stdout.put("[countObjectsAtOnce] Remove object and weight for stable zero ...")
while True:
m = self.queue_cont_read.get()
if m[1] == STABLE and float(m[2]) < 0.1:
break
self.queue_stdout.put("[countObjectsAtOnce] Stable zero acquired. Put objects on weight")
# weight will now become UNSTABLE due to change of pieces on scale
weight = None
while True:
m = self.queue_cont_read.get()
if m[1] == STABLE and float(m[2]) > 0.1:
weight = float(m[2])
break
if weight is not None:
self.queue_stdout.put("[countObjectsAtOnce] Counted {0} objects".format(weight/self.target))
self.count_results_once = weight / self.target
else:
self.queue_stdout.put("[countObjectsAtOnce] Counting failed. Measured weight is None")
self.count_results_once = None
# Write to file on new stable weight.
def writefile(self):
while True:
f = open(self.all_file, "a+")
if self.STOP_WRITE:
break
m = self.queue_writefile.get()
self.queue_stdout.put(m)
str_filewrite = ",".join(m) + "\n"
if f.write(str_filewrite) != len(str_filewrite):
self.queue_stdout.put("[writefile] error writing to file")
f.close()
# API for setting tare value. If value and unit is not given, set tare to current value
def setTare(self, zero=False):
# signal to thread_read_cont to stop and acquire mutex
self.stopReadCont()
self.mutex.acquire()
while not self.queue_cont_read.empty():
self.queue_stdout.put(self.queue_cont_read.get())
# Our scale only supports tare on next stable weight.
self.ser.write(CMD_SET_TARE)
# Response is "T S value unit". If not "S", something went wrong.
response = self.ser.read_until(serial.CR+serial.LF).decode("ascii").strip()
response_parts = response.split()
if not zero:
self.current_tare += float(response_parts[1])
self.queue_stdout.put(self.current_tare)
# release mutex and continue with continuous weight reading
self.mutex.release()
self.startReadCont()
# Could be deprecated but we love to keep backward compatibility ;).
def setZero(self):
return self.setTare(0)
# API for stoping writefile thread. Should not close this thread unless the end of the program.
def stopWritefile(self):
self.STOP_WRITE = True
self.thread_writefile.join()
caller = sys._getframe(1).f_code.co_name
self.queue_stdout.put("[{0}] thread *writefile* joined!".format(caller))
self.thread_writefile = None
# API for stoping read_cont thread.
def stopReadCont(self):
self.STOP_MAIN = True
self.thread_cont_read.join()
# self.ser.write("@\r\n".encode("ascii"))
self.mutex.release()
caller = sys._getframe(1).f_code.co_name
self.queue_stdout.put("[{0}] thread *read_cont* joined!".format(caller))
self.thread_cont_read = None
if __name__ == "__main__":
libra = Libra(
port="/dev/ttyUSB0",
baudrate=2400,
bytesize=serial.SEVENBITS,
parity=serial.PARITY_EVEN,
stopbits=serial.STOPBITS_ONE,
xonxoff=True
)
for thread in threading.enumerate():
print("[main] Thread: " + thread.name)
x = input("Press key to select option: ").strip()
try:
if x == "cr":
libra.countObjectsInRow()
elif x == "ca":
tw = input("Target weight (None): ")
if not tw:
libra.countObjectsAtOnce()
else:
libra.countObjectsAtOnce(target_weight=float(tw))
elif x == "calib":
w = float(input("Weight for calibration: "))
libra.calibrate(weight=w)
elif x == "t":
t = input("Tare: ")
if not t:
libra.setTare()
else:
libra.setTare(value=float(t))
while True:
pass
except KeyboardInterrupt:
libra.stopReadCont()
libra.stopWritefile()
|
data_util.py
|
"""
This code is based on https://github.com/fchollet/keras/blob/master/keras/utils/data_utils.py
"""
import time
import numpy as np
import threading
import multiprocessing
try:
import queue
except ImportError:
import Queue as queue
class GeneratorEnqueuer(object):
"""
Builds a queue out of a data generator.
Args:
generator: a generator function which endlessly yields data
use_multiprocessing (bool): use multiprocessing if True,
otherwise use threading.
wait_time (float): time to sleep in-between calls to `put()`.
random_seed (int): Initial seed for workers,
will be incremented by one for each workers.
"""
def __init__(self,
generator,
use_multiprocessing=False,
wait_time=0.05,
random_seed=None):
self.wait_time = wait_time
self._generator = generator
self._use_multiprocessing = use_multiprocessing
self._threads = []
self._stop_event = None
self.queue = None
self._manager = None
self.seed = random_seed
def start(self, workers=1, max_queue_size=10):
"""
Start worker threads which add data from the generator into the queue.
Args:
workers (int): number of worker threads
max_queue_size (int): queue size
(when full, threads could block on `put()`)
"""
def data_generator_task():
"""
Data generator task.
"""
def task():
if (self.queue is not None and
self.queue.qsize() < max_queue_size):
generator_output = next(self._generator)
self.queue.put((generator_output))
else:
time.sleep(self.wait_time)
if not self._use_multiprocessing:
while not self._stop_event.is_set():
with self.genlock:
try:
task()
except Exception:
self._stop_event.set()
break
else:
while not self._stop_event.is_set():
try:
task()
except Exception:
self._stop_event.set()
break
try:
if self._use_multiprocessing:
self._manager = multiprocessing.Manager()
self.queue = self._manager.Queue(maxsize=max_queue_size)
self._stop_event = multiprocessing.Event()
else:
self.genlock = threading.Lock()
self.queue = queue.Queue()
self._stop_event = threading.Event()
for _ in range(workers):
if self._use_multiprocessing:
# Reset random seed else all children processes
# share the same seed
np.random.seed(self.seed)
thread = multiprocessing.Process(target=data_generator_task)
thread.daemon = True
if self.seed is not None:
self.seed += 1
else:
thread = threading.Thread(target=data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
"""
Returns:
bool: Whether the worker theads are running.
"""
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""
Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Args:
timeout(int|None): maximum time to wait on `thread.join()`.
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if self._use_multiprocessing:
if thread.is_alive():
thread.terminate()
else:
thread.join(timeout)
if self._manager:
self._manager.shutdown()
self._threads = []
self._stop_event = None
self.queue = None
def get(self):
"""
Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
tuple of data in the queue.
"""
while self.is_running():
if not self.queue.empty():
inputs = self.queue.get()
if inputs is not None:
yield inputs
else:
time.sleep(self.wait_time)
|
assistant.py
|
#
# K9 Conversation by Richard Hopkins using
# Kitt-AI Snowboy for hotword recognition
# Watson Speech to Text (streaming to sockets)
# Watson Conversation
# eSpeak Text to Speech
# Robot status displayed with Adafruit PWM Servo Driver driving LED brightness
#
# Original TTS elements (now much revised) derived from
# Joshua Rees-Jones, IBM intern
# "Getting robots to listen"
#
# Conversation elements derived from:
# watson-developer-cloud/python-sdk/examples/conversation_v1.py
#
# Snowboy elements derived from
# Kitt-AI/snowboy/examples/Python/demo.py
#
# Released under The Unlicense license
import os, sys, subprocess, threading, time, json, re, signal, snowboydecoder,ssl
import requests
from requests.auth import HTTPBasicAuth
from ws4py.client.threadedclient import WebSocketClient
from watson_developer_cloud import ConversationV1
import Adafruit_PCA9685
STTusername = os.environ['WTTSusername']
STTpassword = os.environ['WTTSpassword']
WAusername = os.environ['WCusername']
WApassword = os.environ['WCpassword']
WAworkspace_id = os.environ['WCworkspace']
conversation = ConversationV1(
username=WAusername,
password=WApassword,
version='2018-02-16')
#print "STT Username: " + str(STTusername)
#print "STT Password: " + str(STTpassword)
#print "WA Username: " + str(WAusername)
#print "WA Password: " + str(WApassword)
#print "WA Workspace: " + str(WAworkspace_id)
r = requests.get('https://stream.watsonplatform.net/authorization/api/v1/token?url=https://stream.watsonplatform.net/speech-to-text/api', auth=HTTPBasicAuth(STTusername, STTpassword))
#print r.status_code
auth_token = (r.content)
# Initialising TTS global variables
speech_received = False # has speech been returned by Watson?
transcript = "silence" # default value for speech if nothing returned
# Initialise snowboy global variables
model = "./K9.pmdl"
interrupted = False
# Initialise the PWM device using the default address
#pwm = Adafruit_PCA9685.PCA9685()
#pwm.set_pwm_freq(100) # Set frequency to 100 Hz
pwm=0
# Create names for each PWM channel
PWM_eye = 0
PWM_hover = 1
def signal_handler(signal, frame):
global interrupted
interrupted = True
def interrupt_callback():
global interrupted
return interrupted
signal.signal(signal.SIGINT, signal_handler)
detector = snowboydecoder.HotwordDetector(model, sensitivity=0.5)
class SpeechToTextClient(WebSocketClient):
def __init__(self):
ws_url = 'wss://stream.watsonplatform.net/speech-to-text/api/v1/recognize'
self.listening = False
try:
WebSocketClient.__init__(self, ws_url,
headers=[("X-Watson-Authorization-Token",auth_token)])
self.connect()
except: print "Failed to open WebSocket."
def opened(self):
#print "opened(self) and self.send"
self.send('{"action":"start","content-type":"audio/l16;rate=16000","interim_results":true}')
self.stream_audio_thread = threading.Thread(target=self.stream_audio)
self.stream_audio_thread.start()
def received_message(self, message):
global speech_received
global transcript
global pwm
message = json.loads(str(message))
#print "Received: " + str(message)
if "state" in message and not speech_received:
if message["state"] == "listening":
self.listening = True
set_PWM(PWM_eye,100)
print "+++ SPEAK NOW +++"
if "results" in message:
#print message['results'][0]['final']
#print message['results'][0]['alternatives'][0]['transcript']
if message['results'][0]['final'] :
transcript = message['results'][0]['alternatives'][0]['transcript']
self.listening = False
speech_received = True
#print "Sending stop transcription message"
self.send('{"action": "stop"}')
set_PWM(PWM_eye,3)
self.close()
print "+++ SPEECH PROCESSED +++"
if "error" in message:
speech_received = True
self.listening = False
self.send('{"action": "stop"}')
print "+++ NOTHING HEARD +++"
set_PWM(PWM_eye,3)
self.close()
def stream_audio(self):
#print "Entering stream_audio(self)"
while not self.listening:
time.sleep(0.1)
reccmd = ["arecord", "-f", "S16_LE", "-r", "16000", "-t", "raw"]
#print "arecord and p=subprocess.Popen"
p = subprocess.Popen(reccmd, stdout=subprocess.PIPE)
while self.listening:
#print "while self.listening is true"
data = p.stdout.read(1024)
try:
#print "self.send bytearray"
self.send(bytearray(data), binary=True)
except ssl.SSLError: pass
p.kill()
#print "p.kill()"
def close(self):
self.listening = False
speech_received = True
self.stream_audio_thread.join()
#print "close self - self.listening false - clean closure"
# K9 hotword has been detected
def K9_detected():
global pwm
print "+++ HOT WORD DETECTED +++"
set_PWM(PWM_eye,30)
global stop_now
stop_now = True # get the detector to terminate
def speech_to_text():
global transcript
global speech_received
speech_received = False # has speech been returned by Watson?
transcript = "silence" # default value for speech if nothing returned
#print "stt_client initialisation"
stt_client = SpeechToTextClient()
while not speech_received:
#print "not hearing anything, so sleeping"
time.sleep(0.1)
return transcript
def stop_snowboy():
global stop_now
return stop_now
# Sets brightness of PWM lights from 0 to 100
def set_PWM(light, brightness):
global pwm
light = int(light)
brightness = int(float(brightness)*40.95)
if light >=0 and light <=15: # check that PWM channel exists
if brightness >= 0 and brightness <= 4095: # check that frequency is valid
pass
#pwm.set_pwm(0,light,brightness)
#print "Eye brightness set to: " + str(brightness)
# Initialise the eye lights at 3%
set_PWM(PWM_eye,3)
#print "Calling listen_for_K9"
interrupted = False
stop_now = False
print "+++ Listening for K9 keyword... press Ctrl+Z to exit +++"
detector.start(detected_callback=K9_detected,
interrupt_check=stop_snowboy,
sleep_time=0.03)
detector.terminate()
time.sleep(0.03)
speech_received = False
transcript = "silence"
#print "Calling speech_to_text"
speech_to_text()
#print "To conversation: " + transcript
response = conversation.message(workspace_id=WAworkspace_id, input={'text':transcript})
results = re.search(': \{u\'text\': \[u\'(.*)\'\], u\'log', str(response))
answer = results.group(1)
answer = './tts ' + answer
subprocess.call(answer, shell=True)
|
ios_device.py
|
# Copyright 2019 WebPageTest LLC.
# Copyright 2017 Google Inc.
# Use of this source code is governed by the Apache 2.0 license that can be
# found in the LICENSE file.
"""Interface for iWptBrowser on iOS devices"""
import base64
import logging
import multiprocessing
import os
import platform
import select
import shutil
import subprocess
import sys
import threading
if (sys.version_info >= (3, 0)):
from time import monotonic
else:
from monotonic import monotonic
try:
import ujson as json
except BaseException:
import json
class iOSDevice(object):
"""iOS device interface"""
def __init__(self, serial=None):
self.socket = None
self.serial = serial
self.must_disconnect = False
self.mux = None
self.message_thread = None
self.messages = multiprocessing.JoinableQueue()
self.notification_queue = None
self.current_id = 0
self.video_file = None
self.last_video_data = None
self.video_size = 0
self.last_restart = monotonic()
def check_install(self):
"""Check to make sure usbmux is installed and the device is available"""
ret = False
plat = platform.system()
if plat == "Darwin" or plat == "Linux":
if not os.path.exists('/var/run/usbmuxd'):
subprocess.call(['sudo', 'usbmuxd'])
if os.path.exists('/var/run/usbmuxd'):
ret = True
else:
print("usbmuxd is not available, please try installing it manually")
else:
print("iOS is only supported on Mac and Linux")
return ret
def startup(self):
"""Initialize USBMux if it isn't already"""
if self.mux is None:
try:
if not os.path.exists('/var/run/usbmuxd'):
subprocess.call(['sudo', 'usbmuxd'])
from .support.ios.usbmux import USBMux
self.mux = USBMux()
except Exception:
logging.exception("Error initializing usbmux")
def get_devices(self):
"""Get a list of available devices"""
self.startup()
devices = []
self.mux.process(0.1)
if self.mux.devices:
for device in self.mux.devices:
devices.append(device.serial)
return devices
def is_device_ready(self):
"""Get the battery level and only if it responds and is over 75% is it ok"""
is_ready = False
response = self.send_message("battery")
if response:
level = int(round(float(response) * 100))
if level > 75:
logging.debug("Battery level = %d%%", level)
is_ready = True
else:
logging.debug("Device battery is low (%d%%)", level)
else:
logging.debug("Device is not connected (or iWptBrowser is not running)")
self.disconnect()
return is_ready
def get_os_version(self):
"""Get the running version of iOS"""
return self.send_message("osversion")
def clear_cache(self):
"""Clear the browser cache"""
is_ok = False
if self.send_message("clearcache"):
is_ok = True
return is_ok
def start_browser(self):
"""Start the browser"""
is_ok = False
if self.send_message("startbrowser"):
is_ok = True
return is_ok
def stop_browser(self):
"""Stop the browser"""
is_ok = False
if self.send_message("stopbrowser"):
is_ok = True
return is_ok
def navigate(self, url):
"""Navigate to the given URL"""
is_ok = False
if self.send_message("navigate", data=url):
is_ok = True
return is_ok
def execute_js(self, script, remove_orange=False):
"""Run the given script"""
command = "exec"
if remove_orange:
command += ".removeorange"
ret = self.send_message(command, data=script)
try:
ret = json.loads(ret)
except Exception:
logging.exception('Error running script')
return ret
def set_user_agent(self, ua_string):
"""Override the UA string"""
is_ok = False
if self.send_message("setuseragent", data=ua_string):
is_ok = True
return is_ok
def set_cookie(self, url, name, value):
"""Set a cookie"""
is_ok = False
if self.send_message("setcookie", data=url + ";" + name + ";" + value):
is_ok = True
return is_ok
def show_orange(self):
"""Bring up the orange overlay"""
is_ok = False
if self.send_message("showorange"):
is_ok = True
return is_ok
def screenshot(self, png=True):
"""Capture a screenshot (PNG or JPEG)"""
msg = "screenshotbig" if png else "screenshotbigjpeg"
return self.send_message(msg)
def start_video(self):
"""Start video capture"""
is_ok = False
if self.send_message("startvideo"):
is_ok = True
return is_ok
def stop_video(self):
"""Stop the video capture and store it at the given path"""
is_ok = False
if self.send_message("stopvideo"):
is_ok = True
return is_ok
def get_video(self, video_path):
"""Retrieve the recorded video"""
is_ok = False
self.video_size = 0
if self.video_file is not None:
self.video_file.close()
self.video_file = open(video_path, 'wb')
if self.video_file:
if self.send_message("getvideo", timeout=600):
logging.debug("Video complete: %d bytes", self.video_size)
self.send_message("deletevideo")
if self.video_size > 0:
is_ok = True
self.video_file.close()
self.video_file = None
return is_ok
def landscape(self):
"""Switch to landscape orientation"""
self.send_message("landscape", wait=False)
def portrait(self):
"""Switch to portrait orientation"""
self.send_message("portrait", wait=False)
def connect(self):
"""Connect to the device with the matching serial number"""
self.startup()
connecting = False
needs_restart = False
try:
if self.socket is None:
self.disconnect()
self.mux.process(0.1)
devices = self.mux.devices
if devices:
for device in devices:
if self.serial is None or device.serial == self.serial:
logging.debug("Connecting to device %s", device.serial)
self.serial = device.serial
self.must_disconnect = False
connecting = True
self.socket = self.mux.connect(device, 19222)
self.message_thread = threading.Thread(target=self.pump_messages)
self.message_thread.daemon = True
self.message_thread.start()
break
except Exception:
logging.exception('Error connecting to device')
# If the app isn't running restart the device (no more than every 10 minutes)
if connecting and monotonic() - self.last_restart > 600:
needs_restart = True
if needs_restart:
self.last_restart = monotonic()
try:
subprocess.call(['idevicediagnostics', 'restart'])
except Exception:
logging.exception('Error restarting device')
return self.socket is not None
def disconnect(self):
"""Disconnect from the device"""
self.must_disconnect = True
if self.socket is not None:
self.socket.close()
self.socket = None
if self.message_thread is not None:
# self.message_thread.join()
self.message_thread = None
def send_message(self, message, data=None, wait=True, timeout=30):
"""Send a command and get the response"""
response = None
if self.connect():
self.current_id += 1
message_id = self.current_id
msg = "{0:d}:{1}".format(message_id, message)
logging.debug(">>> %s", msg)
if data is not None:
if data.find("\t") >= 0 or data.find("\n") >= 0 or data.find("\r") >= 0:
msg += ".encoded"
data = base64.b64encode(data)
msg += "\t"
msg += data
try:
self.socket.send(msg + "\n")
if wait:
end = monotonic() + timeout
while response is None and monotonic() < end:
try:
msg = self.messages.get(timeout=1)
try:
self.messages.task_done()
if msg:
if msg['msg'] == 'disconnected':
self.disconnect()
self.connect()
elif 'id' in msg and msg['id'] == str(message_id):
if msg['msg'] == 'OK':
if 'data' in msg:
response = msg['data']
else:
response = True
else:
break
except Exception:
logging.exception('Error processing message')
except Exception:
pass
except Exception:
logging.exception('Error sending message')
self.disconnect()
return response
def flush_messages(self):
"""Flush all of the pending messages"""
try:
while True:
self.messages.get_nowait()
self.messages.task_done()
except Exception:
pass
def pump_messages(self):
"""Background thread for reading messages from the browser"""
buff = ""
try:
while not self.must_disconnect and self.socket != None:
rlo, _, xlo = select.select([self.socket], [], [self.socket])
try:
if xlo:
logging.debug("iWptBrowser disconnected")
self.messages.put({"msg": "disconnected"})
return
if rlo:
data_in = self.socket.recv(8192)
if not data_in:
logging.debug("iWptBrowser disconnected")
self.messages.put({"msg": "disconnected"})
return
buff += data_in
pos = 0
while pos >= 0:
pos = buff.find("\n")
if pos >= 0:
message = buff[:pos].strip()
buff = buff[pos + 1:]
if message:
self.process_raw_message(message)
except Exception:
logging.exception('Error pumping message')
except Exception:
pass
def process_raw_message(self, message):
"""Process a single message string"""
ts_end = message.find("\t")
if ts_end > 0:
message_len = len(message)
timestamp = message[:ts_end]
event_end = message.find("\t", ts_end + 1)
if event_end == -1:
event_end = message_len
event = message[ts_end + 1:event_end]
if timestamp and event:
msg = {'ts': timestamp}
data = None
if event_end < message_len:
data = message[event_end + 1:]
parts = event.split(":")
if len(parts) > 1:
msg['id'] = parts[0]
message = parts[1].strip()
else:
message = parts[0].strip()
if message:
parts = message.split("!")
msg['msg'] = parts[0].strip()
if 'encoded' in parts and data is not None:
data = base64.b64decode(data)
if data is not None:
msg['data'] = data
self.process_message(msg)
def process_message(self, msg):
"""Handle a single decoded message"""
if msg['msg'] == 'VideoData' and 'data' in msg:
now = monotonic()
self.video_size += len(msg['data'])
if self.last_video_data is None or now - self.last_video_data >= 0.5:
logging.debug('<<< Video data (current size: %d)', self.video_size)
self.last_video_data = now
if self.video_file is not None:
self.video_file.write(msg['data'])
elif 'id' in msg:
logging.debug('<<< %s:%s', msg['id'], msg['msg'])
try:
self.messages.put(msg)
except Exception:
logging.exception('Error adding message to queue')
elif self.notification_queue is not None:
logging.debug('<<< %s', msg['msg'])
try:
self.notification_queue.put(msg)
except Exception:
logging.exception('Error adding message to notification queue')
|
update_from_github.py
|
import os
import sys
import time
import subprocess
import threading
import re
import zipfile
import shutil
import stat
import glob
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath(os.path.join(current_path, os.pardir))
top_path = os.path.abspath(os.path.join(root_path, os.pardir, os.pardir))
code_path = os.path.abspath(os.path.join(root_path, os.pardir))
data_root = os.path.join(top_path, 'data')
python_path = os.path.join(root_path, 'python27', '1.0')
noarch_lib = os.path.join(python_path, 'lib', 'noarch')
sys.path.append(noarch_lib)
import simple_http_client
from xlog import getLogger
xlog = getLogger("launcher")
import config
import update
if not os.path.isdir(data_root):
os.mkdir(data_root)
download_path = os.path.join(data_root, 'downloads')
if not os.path.isdir(download_path):
os.mkdir(download_path)
progress = {} # link => {"size", 'downloaded', status:downloading|canceled|finished:failed}
progress["update_status"] = "Idle"
update_info = "init"
def init_update_info(check_update):
global update_info
if check_update == "dont-check":
update_info = "dont-check"
elif config.get(["update", "check_update"]) == update_info == "dont-check":
update_info = "init"
elif check_update != "init":
update_info = ""
init_update_info(config.get(["update", "check_update"]))
def request(url, retry=0, timeout=30):
if retry == 0:
if int(config.get(["proxy", "enable"], 0)):
client = simple_http_client.Client(proxy={
"type": config.get(["proxy", "type"], ""),
"host": config.get(["proxy", "host"], ""),
"port": int(config.get(["proxy", "port"], 0)),
"user": config.get(["proxy", "user"], ""),
"pass": config.get(["proxy", "passwd"], ""),
}, timeout=timeout)
else:
client = simple_http_client.Client(timeout=timeout)
else:
cert = os.path.join(data_root, "gae_proxy", "CA.crt")
client = simple_http_client.Client(proxy={
"type": "http",
"host": "127.0.0.1",
"port": 8087,
"user": None,
"pass": None
}, timeout=timeout, cert=cert)
res = client.request("GET", url, read_payload=False)
return res
def download_file(url, filename):
if url not in progress:
progress[url] = {}
progress[url]["status"] = "downloading"
progress[url]["size"] = 1
progress[url]["downloaded"] = 0
else:
if progress[url]["status"] == "downloading":
xlog.warn("url in downloading, %s", url)
return False
for i in range(0, 2):
try:
xlog.info("download %s to %s, retry:%d", url, filename, i)
req = request(url, i, timeout=120)
if not req:
continue
start_time = time.time()
timeout = 300
if req.chunked:
# don't known the file size, set to large for show the progress
progress[url]["size"] = 20 * 1024 * 1024
downloaded = 0
with open(filename, 'wb') as fp:
while True:
time_left = timeout - (time.time() - start_time)
if time_left < 0:
raise Exception("time out")
dat = req.read(timeout=time_left)
if not dat:
break
fp.write(dat)
downloaded += len(dat)
progress[url]["downloaded"] = downloaded
progress[url]["status"] = "finished"
return True
else:
file_size = progress[url]["size"] = int(req.getheader('Content-Length', 0))
left = file_size
downloaded = 0
with open(filename, 'wb') as fp:
while True:
chunk_len = min(65536, left)
if not chunk_len:
break
chunk = req.read(chunk_len)
if not chunk:
break
fp.write(chunk)
downloaded += len(chunk)
progress[url]["downloaded"] = downloaded
left -= len(chunk)
if downloaded != progress[url]["size"]:
xlog.warn("download size:%d, need size:%d, download fail.", downloaded, progress[url]["size"])
continue
else:
progress[url]["status"] = "finished"
return True
except Exception as e:
xlog.warn("download %s to %s fail:%r", url, filename, e)
continue
progress[url]["status"] = "failed"
return False
def parse_readme_versions(readme_file):
versions = []
try:
fd = open(readme_file, "r")
lines = fd.readlines()
p = re.compile(r'https://codeload.github.com/XX-net/XX-Net/zip/([0-9]+)\.([0-9]+)\.([0-9]+) ([0-9a-f]*)')
for line in lines:
m = p.match(line)
if m:
version = m.group(1) + "." + m.group(2) + "." + m.group(3)
hashsum = m.group(4)
versions.append([m.group(0), version, hashsum])
if len(versions) == 2:
return versions
except Exception as e:
xlog.exception("xxnet_version fail:%r", e)
raise "get_version_fail:%s" % readme_file
def current_version():
readme_file = os.path.join(root_path, "version.txt")
try:
with open(readme_file) as fd:
content = fd.read()
p = re.compile(r'([0-9]+)\.([0-9]+)\.([0-9]+)')
m = p.match(content)
if m:
version = m.group(1) + "." + m.group(2) + "." + m.group(3)
return version
except:
xlog.warn("get_version_fail in update_from_github")
return "get_version_fail"
def get_github_versions():
readme_url = "https://raw.githubusercontent.com/XX-net/XX-Net/master/code/default/update_version.txt"
readme_target = os.path.join(download_path, "version.txt")
if not download_file(readme_url, readme_target):
raise IOError("get README %s fail:" % readme_url)
versions = parse_readme_versions(readme_target)
return versions
def get_hash_sum(version):
versions = get_github_versions()
for v in versions:
if v[1] == version:
return v[2]
def hash_file_sum(filename):
import hashlib
BLOCKSIZE = 65536
hasher = hashlib.sha256()
try:
with open(filename, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return hasher.hexdigest()
except:
return False
def overwrite(xxnet_version, xxnet_unzip_path):
progress["update_status"] = "Overwriting"
try:
for root, subdirs, files in os.walk(xxnet_unzip_path):
relate_path = root[len(xxnet_unzip_path) + 1:]
target_relate_path = relate_path
if sys.platform == 'win32':
if target_relate_path.startswith("code\\default"):
target_relate_path = "code\\" + xxnet_version + relate_path[12:]
else:
if target_relate_path.startswith("code/default"):
target_relate_path = "code/" + xxnet_version + relate_path[12:]
for subdir in subdirs:
if relate_path == "code" and subdir == "default":
subdir = xxnet_version
target_path = os.path.join(top_path, target_relate_path, subdir)
if not os.path.isdir(target_path):
xlog.info("mkdir %s", target_path)
os.mkdir(target_path)
for filename in files:
src_file = os.path.join(root, filename)
dst_file = os.path.join(top_path, target_relate_path, filename)
if not os.path.isfile(dst_file) or hash_file_sum(src_file) != hash_file_sum(dst_file):
xlog.info("copy %s => %s", src_file, dst_file)
# modify by outofmemo, files in '/sdcard' are not allowed to chmod for Android
# and shutil.copy() will call shutil.copymode()
if sys.platform != 'win32' and os.path.isfile("/system/bin/dalvikvm") == False and os.path.isfile("/system/bin/dalvikvm64") == False and os.path.isfile(dst_file):
st = os.stat(dst_file)
shutil.copy(src_file, dst_file)
if st.st_mode & stat.S_IEXEC:
os.chmod(dst_file, st.st_mode)
else:
shutil.copyfile(src_file, dst_file)
except Exception as e:
xlog.warn("update overwrite fail:%r", e)
progress["update_status"] = "Overwrite Fail:%r" % e
raise e
xlog.info("update file finished.")
def download_overwrite_new_version(xxnet_version, checkhash=1):
global update_progress
xxnet_url = 'https://codeload.github.com/XX-net/XX-Net/zip/%s' % xxnet_version
xxnet_zip_file = os.path.join(download_path, "XX-Net-%s.zip" % xxnet_version)
xxnet_unzip_path = os.path.join(download_path, "XX-Net-%s" % xxnet_version)
progress["update_status"] = "Downloading %s" % xxnet_url
if not download_file(xxnet_url, xxnet_zip_file):
progress["update_status"] = "Download Fail."
raise Exception("download xxnet zip fail:%s" % xxnet_zip_file)
if checkhash:
hash_sum = get_hash_sum(xxnet_version)
if len(hash_sum) and hash_file_sum(xxnet_zip_file) != hash_sum:
progress["update_status"] = "Download Checksum Fail."
xlog.warn("downloaded xxnet zip checksum fail:%s" % xxnet_zip_file)
raise Exception("downloaded xxnet zip checksum fail:%s" % xxnet_zip_file)
else:
xlog.debug("skip checking downloaded file hash")
xlog.info("update download %s finished.", download_path)
xlog.info("update start unzip")
progress["update_status"] = "Unziping"
try:
with zipfile.ZipFile(xxnet_zip_file, "r") as dz:
dz.extractall(download_path)
dz.close()
except Exception as e:
xlog.warn("unzip %s fail:%r", xxnet_zip_file, e)
progress["update_status"] = "Unzip Fail:%s" % e
raise e
xlog.info("update finished unzip")
overwrite(xxnet_version, xxnet_unzip_path)
os.remove(xxnet_zip_file)
shutil.rmtree(xxnet_unzip_path, ignore_errors=True)
def get_local_versions():
def get_folder_version(folder):
f = os.path.join(code_path, folder, "version.txt")
try:
with open(f) as fd:
content = fd.read()
p = re.compile(r'([0-9]+)\.([0-9]+)\.([0-9]+)')
m = p.match(content)
if m:
version = m.group(1) + "." + m.group(2) + "." + m.group(3)
return version
except:
return False
files_in_code_path = os.listdir(code_path)
local_versions = []
for name in files_in_code_path:
if os.path.isdir(os.path.join(code_path, name)):
v = get_folder_version(name)
if v:
local_versions.append([v, name])
local_versions.sort(key=lambda s: map(int, s[0].split('.')), reverse=True)
return local_versions
def get_current_version_dir():
current_dir = os.path.split(root_path)[-1]
return current_dir
def del_version(version):
if version == get_current_version_dir():
xlog.warn("try to delect current version.")
return False
try:
shutil.rmtree(os.path.join(top_path, "code", version))
return True
except Exception as e:
xlog.warn("deleting fail: %s", e)
return False
def update_current_version(version):
start_script = os.path.join(top_path, "code", version, "launcher", "start.py")
if not os.path.isfile(start_script):
xlog.warn("set version %s not exist", version)
return False
current_version_file = os.path.join(top_path, "code", "version.txt")
with open(current_version_file, "w") as fd:
fd.write(version)
return True
def restart_xxnet(version=None):
import module_init
module_init.stop_all()
import web_control
web_control.stop()
# New process will hold the listen port
# We should close all listen port before create new process
xlog.info("Close web control port.")
if version is None:
current_version_file = os.path.join(top_path, "code", "version.txt")
with open(current_version_file, "r") as fd:
version = fd.read()
xlog.info("restart to xx-net version:%s", version)
start_script = os.path.join(top_path, "code", version, "launcher", "start.py")
subprocess.Popen([sys.executable, start_script])
time.sleep(20)
xlog.info("Exit old process...")
os._exit(0)
def update_version(version, checkhash=1):
global update_progress, update_info
_update_info = update_info
update_info = ""
try:
download_overwrite_new_version(version, checkhash)
update_current_version(version)
progress["update_status"] = "Restarting"
xlog.info("update try restart xxnet")
restart_xxnet(version)
except Exception as e:
xlog.warn("update version %s fail:%r", version, e)
update_info = _update_info
def start_update_version(version, checkhash=1):
if progress["update_status"] != "Idle" and "Fail" not in progress["update_status"]:
return progress["update_status"]
progress["update_status"] = "Start update"
th = threading.Thread(target=update_version, args=(version, checkhash))
th.start()
return True
def cleanup():
def rm_paths(path_list):
del_fullpaths = []
for ps in path_list:
pt = os.path.join(top_path, ps)
pt = glob.glob(pt)
del_fullpaths += pt
if del_fullpaths:
xlog.info("DELETE: %s", ' , '.join(del_fullpaths))
for pt in del_fullpaths:
try:
if os.path.isfile(pt):
os.remove(pt)
elif os.path.isdir(pt):
shutil.rmtree(pt)
except:
pass
keep_old_num = config.get(["modules", "launcher", "keep_old_ver_num"], 6) # default keep several old versions
if keep_old_num < 99 and keep_old_num >= 0: # 99 means don't delete any old version
del_paths = []
local_vs = get_local_versions()
for i in range(len(local_vs)):
if local_vs[i][0] == current_version():
for u in range(i + keep_old_num + 1, len(local_vs)):
del_paths.append("code/" + local_vs[u][1] + "/")
break
if del_paths:
rm_paths(del_paths)
del_paths = []
if config.get(["savedisk", "clear_cache"], 0):
del_paths += [
"data/*/*.*.log",
"data/*/*.log.*",
"data/downloads/XX-Net-*.zip"
]
if config.get(["savedisk", "del_win"], 0):
del_paths += [
"code/*/python27/1.0/WinSxS/",
"code/*/python27/1.0/*.dll",
"code/*/python27/1.0/*.exe",
"code/*/python27/1.0/Microsoft.VC90.CRT.manifest",
"code/*/python27/1.0/lib/win32/"
]
if config.get(["savedisk", "del_mac"], 0):
del_paths += [
"code/*/python27/1.0/lib/darwin/"
]
if config.get(["savedisk", "del_linux"], 0):
del_paths += [
"code/*/python27/1.0/lib/linux/"
]
if config.get(["savedisk", "del_gae"], 0):
del_paths += [
"code/*/gae_proxy/"
]
if config.get(["savedisk", "del_gae_server"], 0):
del_paths += [
"code/*/gae_proxy/server/"
]
if config.get(["savedisk", "del_xtunnel"], 0):
del_paths += [
"code/*/x_tunnel/"
]
if config.get(["savedisk", "del_smartroute"], 0):
del_paths += [
"code/*/smart_router/"
]
if del_paths:
rm_paths(del_paths)
|
__init__.py
|
import os
import sys
import subprocess
import threading
import time
import wx
import wx.aui
from wx import FileConfig
import pcbnew
from .dialog import Dialog
def check_for_bom_button():
# From Miles McCoo's blog
# https://kicad.mmccoo.com/2017/03/05/adding-your-own-command-buttons-to-the-pcbnew-gui/
def find_pcbnew_window():
windows = wx.GetTopLevelWindows()
pcbneww = [w for w in windows if "pcbnew" in w.GetTitle().lower()]
if len(pcbneww) != 1:
return None
return pcbneww[0]
def callback(_):
plugin.Run()
path = os.path.dirname(__file__)
while not wx.GetApp():
time.sleep(1)
bm = wx.Bitmap(path + '/icon.png', wx.BITMAP_TYPE_PNG)
button_wx_item_id = 0
from pcbnew import ID_H_TOOLBAR
while True:
time.sleep(1)
pcbnew_window = find_pcbnew_window()
if not pcbnew_window:
continue
top_tb = pcbnew_window.FindWindowById(ID_H_TOOLBAR)
if button_wx_item_id == 0 or not top_tb.FindTool(button_wx_item_id):
top_tb.AddSeparator()
button_wx_item_id = wx.NewId()
top_tb.AddTool(button_wx_item_id, "KiBuzzard", bm,
"Execute Buzzard script", wx.ITEM_NORMAL)
top_tb.Bind(wx.EVT_TOOL, callback, id=button_wx_item_id)
top_tb.Realize()
class KiBuzzardPlugin(pcbnew.ActionPlugin, object):
config_file = os.path.join(os.path.dirname(__file__), '..', 'config.ini')
buzzard_path = os.path.join(os.path.dirname(__file__), '..', 'deps', 'buzzard')
def __init__(self):
super(KiBuzzardPlugin, self).__init__()
self.name = "Create Labels"
self.category = "Modify PCB"
self.pcbnew_icon_support = hasattr(self, "show_toolbar_button")
self.show_toolbar_button = True
icon_dir = os.path.dirname(os.path.dirname(__file__))
self.icon_file_name = os.path.join(icon_dir, 'icon.png')
self.description = "Create Labels"
self.config = FileConfig(localFilename=self.config_file)
self._pcbnew_frame = None
def defaults(self):
pass
def Run(self):
buzzard_script = os.path.join(self.buzzard_path, 'buzzard.py')
if self._pcbnew_frame is None:
try:
self._pcbnew_frame = [x for x in wx.GetTopLevelWindows() if ('pcbnew' in x.GetTitle().lower() and not 'python' in x.GetTitle().lower()) or ('pcb editor' in x.GetTitle().lower())][0]
except:
pass
def run_buzzard(str):
import re
str = str + ' -o ki -stdout'
args = [a.strip('"') for a in re.findall('".+?"|\S+', str)]
if sys.platform.startswith('win'):
args = [re.sub('([<>])', r'^\1', a) for a in args] # escape '<' or '>' with a caret, '^<'
# Execute Buzzard
process = None
if sys.platform.startswith('linux') or sys.platform == 'darwin':
process = subprocess.Popen(['python3', buzzard_script] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
process = subprocess.Popen(['C:\\Python38\\python.exe', buzzard_script] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
if stderr:
wx.MessageBox(stderr, 'Error', wx.OK | wx.ICON_ERROR)
# check for errors
error_line = [s for s in stderr.decode('utf8').split('\n') if 'error' in s]
if len(error_line) > 0:
wx.MessageBox(error_line[0], 'Error', wx.OK | wx.ICON_ERROR)
else:
# Copy footprint into clipboard
if sys.platform.startswith('linux'):
clip_args = ['xclip', '-sel', 'clip', '-noutf8']
elif sys.platform == 'darwin':
clip_args = ['pbcopy']
else:
clip_args = ['clip.exe']
process = subprocess.Popen(clip_args, stdin=subprocess.PIPE)
process.communicate(stdout)
dlg.EndModal(wx.ID_OK)
dlg = Dialog(self._pcbnew_frame, self.config, self.buzzard_path, run_buzzard)
try:
if dlg.ShowModal() == wx.ID_OK:
# Set focus to main window and execute a Paste operation
if self._pcbnew_frame is not None:
self._pcbnew_frame.Raise()
wx.Yield()
keyinput = wx.UIActionSimulator()
keyinput.Char(ord("V"), wx.MOD_CONTROL)
finally:
self.config.Flush()
dlg.Destroy()
plugin = KiBuzzardPlugin()
plugin.register()
# Add a button the hacky way if plugin button is not supported
# in pcbnew, unless this is linux.
if not plugin.pcbnew_icon_support and not sys.platform.startswith('linux'):
t = threading.Thread(target=check_for_bom_button)
t.daemon = True
t.start()
|
system_profiler.py
|
# Copyright 2021 MosaicML. All Rights Reserved.
"""Profiler to record system level metrics."""
from __future__ import annotations
import threading
import time
from typing import TYPE_CHECKING, Dict, cast
import psutil
from composer.callbacks import memory_monitor
from composer.core.callback import Callback
if TYPE_CHECKING:
from composer.core.state import State
from composer.loggers.logger import Logger
from composer.profiler import Profiler
__all__ = ["SystemProfiler"]
class SystemProfiler(Callback):
"""The SystemProfiler records system level metrics. Implemented as a :class:`.Callback`, the profiler forks a
thread during :attr:`.Event.INIT` which polls and records system state.
When used with the Composer :class:`.Trainer`\\, the system profiler is enabled if profiling is enabled.
.. note::
The Composer :class:`.Trainer` creates an instance of :class:`.TorchProfiler` when ``tensorboard_trace_handler_dir`` is provided.
The user should not create and directly register an instance of :class:`.TorchProfiler` when using the Composer :class:`.Trainer`\\.
Args:
profile_cpu (bool): Whether to record cpu statistics (Default: ``True``)
profile_memory (bool): Whether to record memory statistics (Default: ``False``)
profile_disk (bool): Whether to record disk I/O statistics (Default: ``False``)
profile_net (bool): Whether to record network I/O statistics (Default: ``False``)
stats_thread_interval_seconds (float): Interval to record system-level stats, in seconds. (Default: every ``0.5`` seconds)
"""
def __init__(self,
profile_cpu: bool = True,
profile_memory: bool = False,
profile_disk: bool = False,
profile_net: bool = False,
stats_thread_interval_seconds: float = 0.5) -> None:
self.profile_cpu = profile_cpu
self.profile_disk = profile_disk
self.profile_memory = profile_memory
self.profile_net = profile_net
self.stats_thread_interval_seconds = stats_thread_interval_seconds
def init(self, state: State, logger: Logger):
del logger # unused
assert state.profiler is not None, "The trainer should have set the profiler in state"
# Start the stats thread
threading.Thread(target=self._stats_thread, daemon=True, args=[state.profiler]).start()
def _stats_thread(self, profiler: Profiler):
"""Gathers requested system metrics at :attr:`SystemProfiler.stats_thread_interval_seconds` interval."""
psutil.disk_io_counters.cache_clear()
psutil.net_io_counters.cache_clear()
if self.profile_cpu:
psutil.cpu_percent() # spin it once to clear the default 0.0 value on the first call
while True:
if self.profile_cpu:
cpu_percent = psutil.cpu_percent()
profiler.marker(name="cpu", categories=["cpu"]).counter({"cpu_percent": cpu_percent})
if self.profile_memory:
cuda_memory_stats = memory_monitor._get_memory_report()
for name, val in cuda_memory_stats.items():
profiler.marker(f"memory/cuda/{name}", categories=["memory"]).counter({name: val})
swap_memory = psutil.swap_memory()
profiler.marker("memory/swap", categories=["memory"]).counter({
"used_gb": swap_memory.used / 2**9,
"free_gb": swap_memory.free / 2**9
})
virtual_memory = psutil.virtual_memory()
profiler.marker("memory/virtual", categories=["memory"]).counter({
"used_gb": virtual_memory.used / 2**9,
"available_gb": virtual_memory.available / 2**9
})
if self.profile_disk:
disk_io_counters = cast(Dict[str, psutil._common.sdiskio], psutil.disk_io_counters(perdisk=True))
for disk_name, disk_stats in disk_io_counters.items():
for field_name in ("read_count", "write_count", "read_bytes", "write_bytes", "read_time",
"write_time", "busy_time"):
profiler.marker(f"disk/{disk_name}/{field_name}",
categories=["disk"]).counter({"field_name": getattr(disk_stats, field_name)})
if self.profile_net:
net_io_counters = cast(Dict[str, psutil._common.snetio], psutil.net_io_counters(pernic=True))
for nic, nic_stats in net_io_counters.items():
profiler.marker(f"network/{nic}/kb_sent",
categories=["net"]).counter({"kb_sent": nic_stats.bytes_sent / 2**3})
profiler.marker(f"network/{nic}/kb_recv",
categories=["net"]).counter({"kb_recv": nic_stats.bytes_recv / 2**3})
time.sleep(self.stats_thread_interval_seconds)
|
day26-7 进程之间不共享全局变量.py
|
import multiprocessing
import time
# 定义全局变量列表
g_list = []
# 添加数据任务
def add_data():
for i in range(3):
# 列表是可变类型,在原有内存基础上修改数据,内存地址不变,不需要加global关键字
# 加上global的作用:声明要修改全局变量的内存地址
g_list.append(i)
print("添加:", i)
time.sleep(0.2)
print("添加完成:",g_list)
# 读取数据任务
def read_data():
print("读取:", g_list)
# 创建添加数据子进程
add_process = multiprocessing.Process(target=add_data)
# 创建读取数据子进程
read_process = multiprocessing.Process(target=read_data)
# 直接执行的文件模块中必须加main
# 运行子进程
# windows创建子进程会复制一份主进程资源,在复制到子进程时,子进程又会递归往下复制
# 通过判断是否是主模块来解决windows递归创建子进程
if __name__ == '__main__':
add_process.start()
# 当前进程(主进程)等待添加数据进程完成以后代码再继续往下执行
add_process.join()
print("main读取", g_list)
read_process.start()
# 结论:进程之间不共享全局变量
|
main.py
|
#!/usr/bin/env python
# SOURCE: https://blog.miguelgrinberg.com/post/video-streaming-with-flask
import sys
from flask import Flask, render_template, Response
import os
import rospy
import threading
import html
from std_msgs.msg import String
from scripts.tracking_markers_class2 import TrackingCamera
app = Flask(__name__)
# ROS node, publisher, and parameter.
# The node is started in a separate thread to avoid conflicts with Flask.
# The parameter *disable_signals* must be set if node is not initialized
# in the main thread.
# tutorial
threading.Thread(target=lambda: rospy.init_node('test_node', disable_signals=True)).start()
# setup topics related to each chairbot
chair_ids = range(4)
gen_move_task = lambda x: rospy.Publisher(
'/requestMotion0'+str(x), String, queue_size=1)
gen_stop_task = lambda x: rospy.Publisher(
'/requestStop0'+str(x), String, queue_size=1)
pub_motion_arr = list(map(gen_move_task , chair_ids))
pub_stop_arr = list(map(gen_stop_task , chair_ids))
@app.route('/')
def index():
return render_template('index.html')
def gen(camera):
while True:
frame = camera.process()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(TrackingCamera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/move/<direction>/<id>', methods = ['GET'])
def send_movement_command(direction):
if any(direction in d for d in ['forward','backward','left','right', 'stop']):
# new ROSLIB.Message({data: motion})
if (direction == 'stop'):
pub_stop_arr[id].publish( direction.upper() )
else:
pub_motion_arr[id].publish( direction.upper() )
return html.success(direction)
else:
mgs = 'Direction not recognized'
return html.failure(msg)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False)
|
video2npy.py
|
# ============================================================================
# 计算RGB数据并输出保存为.npy文件
# ============================================================================
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from datetime import datetime
import threading
from tensorflow.python.platform import app, flags
import tensorflow as tf
import os
import sys
import time
_EXT = ['.avi', '.mp4']
_IMAGE_SIZE = 224
frameWidth = 224
frameHeight = 224
frameCount = 15
frame_interval = 10
_CLASS_NAMES = 'label_kugou.txt'
# abspath = os.path.abspath(sys.argv[0])
DATA_DIR = '/data2/dataset/Video_9k_dataset_v3/video_9k'
SAVE_DIR = '/data2/ye/data/rgb'
train_or_test = 'test'
train_path = '/data2/ye/instrument-detect/preprocess/video_9k_train_list_v2.txt'
test_path = '/data2/ye/instrument-detect/preprocess/video_9k_test_list_v2.txt'
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', DATA_DIR, 'directory containing data.')
flags.DEFINE_string('save_to', SAVE_DIR, 'where to save flow data.')
flags.DEFINE_integer('num_threads',16, 'number of threads.') #32
flags.DEFINE_string('train_or_test', train_or_test, 'train or test dirs')
def _process_video_files(thread_index, filenames, save_to):
for filename in filenames:
flow = computeRGB(filename)
if np.sum(flow) == 0:
break
fullname, _ = os.path.splitext(filename)
split_name = fullname.split('/')
save_name = os.path.join(save_to, split_name[-3],split_name[-2], split_name[-1] + '.npy')
np.save(save_name, flow)
print("%s [thread %d]: %s done." % (datetime.now(), thread_index, filename))
sys.stdout.flush()
def computeRGB(video_path):
"""
计算RGB数据,并归一化数据到[-1,1],size为224*224
不足224*224则在上下加黑边
"""
cap = cv2.VideoCapture(video_path)
buf = np.zeros((1, frameCount, frameHeight, frameWidth, 3), np.dtype('float32'))
fc = 0
i = 0
ret = True
max_val = lambda x: max(max(x.flatten()), abs(min(x.flatten())))
while (fc < (frameCount + 20)*frame_interval and ret and i < frameCount):
ret, frame = cap.read()
if fc% frame_interval == 0 and ret: #前10帧数据丢弃
if max_val(frame) != 0:
frame = cv2.resize(frame, (224, 168), None, 0, 0, cv2.INTER_LINEAR) # width,height
frame = (frame / float(max_val(frame))) * 2 -1 #rescale(-1,1)
# frame = (frame /255. -0.5) * 2
frame = cv2.copyMakeBorder(frame, 28, 28, 0, 0, cv2.BORDER_CONSTANT, value=(-1, -1, -1)) #加黑边
buf[0][i] = frame
i += 1
fc += 1
cap.release()
if i < frameCount - 3: #允许3帧为空
buf = np.zeros((1, frameCount, frameHeight, frameWidth, 3), np.dtype('float32'))
return buf
def _process_dataset():
"""
多线程处理数据
线程数为FLAGS.num_threads
"""
# import pdb
# pdb.set_trace()
# list1 = [FLAGS.data_dir + "//" + class_fold + "//" + train_or_test + "//" + filename # filename
# for class_fold in
# os.listdir(FLAGS.data_dir) if 'zip' not in class_fold
# for filename in
# os.listdir(FLAGS.data_dir + "//" + class_fold + "//" + train_or_test)
# ]
# list2 = [FLAGS.data_dir + "//" + class_fold + "//" + train_or_test + "//" + filename # filename
# for class_fold in
# os.listdir(FLAGS.save_to) if 'zip' not in class_fold
# for filename in
# os.listdir(FLAGS.save_to + "//" + class_fold + "//" + train_or_test)
# ]
# filenames = [i for i in list1 if i[:-4] + '.npy' not in list2]
if train_or_test == 'train':
f = open(train_path)
if train_or_test == 'test':
f = open(test_path)
train_info = []
for line in f.readlines():
info = line.strip().split(',')
train_info.append(info[0])
f.close()
list2 = ["/" + class_fold + "/" + train_or_test + "/"+ filename
for class_fold in
os.listdir(FLAGS.save_to) if 'zip' not in class_fold
for filename in
os.listdir(FLAGS.save_to + "//" + class_fold + "//" + train_or_test)
]
#filenames = [i for i in train_info if i + '.npy' not in list2]
filenames = [FLAGS.data_dir + '/' + i.split('/')[-3] + '/' + i.split('/')[-2] + '/' + i.split('/')[-1] + '.mp4' for i
in train_info if i + '.npy' not in list2]
print(len(filenames))
time1 = time.time()
filename_chunk = np.array_split(filenames, FLAGS.num_threads)
threads = []
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Launch a thread for each batch.
print("Launching %s threads." % FLAGS.num_threads)
for thread_index in range(FLAGS.num_threads):
args = (thread_index, filename_chunk[thread_index], FLAGS.save_to)
t = threading.Thread(target=_process_video_files, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print("%s: Finished processing all %d videos in data set '%s'." %
(datetime.now(), len(filenames), FLAGS.train_or_test))
duration = time.time() - time1
print("Time total:%.2f, Per Video: %2f" %(duration, duration /len(filenames)))
def main(unused_argv):
f = open(_CLASS_NAMES, 'r')#, encoding= 'utf-8')
classes = [cls[:2] for cls in f.readlines() if cls[0] != '\n' ] #cls[:2]
for cls in classes:
path = FLAGS.save_to + '//' + cls + '//' + train_or_test
if not tf.gfile.IsDirectory(path):
tf.gfile.MakeDirs(path)
_process_dataset()
if __name__ == '__main__':
app.run()
# main()
|
test_vacuum.py
|
from base import pipeline, clean_db
from collections import namedtuple
import getpass
import os
import psycopg2
import psycopg2.extensions
import random
import threading
import time
def test_concurrent_vacuum_full(pipeline, clean_db):
pipeline.create_stream('test_vacuum_stream', x='int')
pipeline.create_cv(
'test_vacuum_full',
'SELECT x::int, COUNT(*) FROM test_vacuum_stream GROUP BY x')
stop = False
def insert():
while not stop:
values = [(random.randint(0, 1000000),) for _ in xrange(1000)]
pipeline.insert('test_vacuum_stream', ('x',), values)
time.sleep(0.01)
threads = [threading.Thread(target=insert) for _ in range(4)]
map(lambda t: t.start(), threads)
# Insert data for a little bit so we have enough work to do while
# vacuuming.
time.sleep(20)
conn = psycopg2.connect('dbname=postgres user=%s host=localhost port=%s' %
(getpass.getuser(), pipeline.port))
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cur = conn.cursor()
cur.execute('VACUUM FULL test_vacuum_full')
conn.close()
# Now kill the insert threads.
stop = True
map(lambda t: t.join(), threads)
|
8c.py
|
import multiprocessing
a = list(enumerate([183677, 186720, 176916, 186554, 113034,
193701, 131768, 142185, 131518, 105202]))
b = [0 for i in range(len(a))]
def fatorial(n):
fat = n
for i in range(n-1, 1, -1):
fat = fat * i
return fat
def main(inq, outq, process):
while not inq.empty():
entry = inq.get()
print('iniciando processo '+str(process)+' input '+str(entry))
outq.put((entry[0], fatorial(entry[1])))
print('terminado processo '+str(process)+' input '+str(entry))
if __name__ == '__main__':
processes = []
in_queue = multiprocessing.Queue()
out_queue = multiprocessing.Queue()
for entry in a:
in_queue.put(entry)
for i in range(4):
p = multiprocessing.Process(target=main, args=(in_queue, out_queue, i))
p.start()
processes.append(p)
for i in range(len(a)):
q = out_queue.get()
b[q[0]] = q[1]
for p in processes:
p.join()
print(b)
|
1_disc_golf_range.py
|
from threading import Semaphore,Lock, Thread
from time import sleep
import random
#initializing semaphores
send_cart = Semaphore(0)
mutex = Semaphore(1)
cart = Semaphore(0)
mutex_ballinc = Semaphore(1)
rng = random.Random() # used to generate random number
#initializing the variables, in case the user does not give any input
stash = 1 # number of discs in central stash
bucket = 1 # number od discs in bucket present with each frolfer
frolfer = 1 # number of frolfers playing
discs_on_field =0 # initially discs on the field
def disc_golf_range(frolfer_id):
global stash, discs_on_field
rng.seed(100) # generates random number
prng=rng.random() # so that that value will
while(True):
mutex.acquire() # lock acquired to make changes in the stash value
print('Frolfer',frolfer_id,'calling for bucket')
if stash >= bucket: # initially the frolfer calls for bucket
stash = stash-bucket
print('Frolfer',frolfer_id,'got',bucket,'discs; ','Stash = ',stash)
else:
send_cart.release() # lock released to send cart to grab discs from filed
cart.acquire() # lock acquired so as to send cart only once
stash = stash-bucket
print('Frolfer',frolfer_id,'got',bucket,'discs;','Stash =',stash)
mutex.release() # lock released so that others can stash value
for i in range(bucket): # frolfer starts throwing
mutex_ballinc.acquire() # lock acquired to count discs on the field
print('Frolfer',frolfer_id,'threw disc ',i)
discs_on_field +=1
mutex_ballinc.release() # lock released so that other frolfer can throw discs
sleep (prng) # be used for random sleep
def collect_discs(): # function to fill cart to refill central stash
global stash, discs_on_field
while(True):
send_cart.acquire() # lock acquired not to allow cart to go on field
print('#############################################')
print('Stash = ', stash ,';Cart entering field')
stash += discs_on_field
print('Cart done, gathered ',discs_on_field,' discs ',' Stash = ',stash)
discs_on_field = 0
print('#############################################')
cart.release() # lock released for next thread to execute
if __name__ == '__main__':
# user inputs
stash= int(input('Enter the number of discs in the Stash: '))
bucket=int(input('Enter the number of discs in the bucket(for each Frolfer): '))
frolfer=int(input('Enter the number of Frolfer playing : '))
# creatng cart thread
cart_thread = Thread(target= collect_discs)
cart_thread.start() #thread starts
for i in range (frolfer):
frolfer_thread = Thread(target=disc_golf_range,args=[i]) # creating frolfer thread
frolfer_thread.start() # thread starts an has as many as number of frolfer
|
docker_runner.py
|
import os
import logging
import pdb
import time
import random
from multiprocessing import Process
import numpy as np
from client import MilvusClient
import utils
import parser
from runner import Runner
logger = logging.getLogger("milvus_benchmark.docker")
class DockerRunner(Runner):
"""run docker mode"""
def __init__(self, image):
super(DockerRunner, self).__init__()
self.image = image
def run(self, definition, run_type=None):
if run_type == "performance":
for op_type, op_value in definition.items():
# run docker mode
run_count = op_value["run_count"]
run_params = op_value["params"]
container = None
if op_type == "insert":
if not run_params:
logger.debug("No run params")
continue
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["table_name"]
volume_name = param["db_path_prefix"]
print(table_name)
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
for k, v in param.items():
if k.startswith("server."):
# Update server config
utils.modify_config(k, v, type="server", db_slave=None)
container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
time.sleep(2)
milvus = MilvusClient(table_name)
# Check has table or not
if milvus.exists_table():
milvus.delete()
time.sleep(10)
milvus.create_table(table_name, dimension, index_file_size, metric_type)
# debug
# milvus.create_index("ivf_sq8", 16384)
res = self.do_insert(milvus, table_name, data_type, dimension, table_size, param["ni_per"])
logger.info(res)
# wait for file merge
time.sleep(table_size * dimension / 5000000)
# Clear up
utils.remove_container(container)
elif op_type == "query":
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
volume_name = param["db_path_prefix"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
for k, v in param.items():
if k.startswith("server."):
utils.modify_config(k, v, type="server")
container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
time.sleep(2)
milvus = MilvusClient(table_name)
logger.debug(milvus.show_tables())
# Check has table or not
if not milvus.exists_table():
logger.warning("Table %s not existed, continue exec next params ..." % table_name)
continue
# parse index info
index_types = param["index.index_types"]
nlists = param["index.nlists"]
# parse top-k, nq, nprobe
top_ks, nqs, nprobes = parser.search_params_parser(param)
for index_type in index_types:
for nlist in nlists:
result = milvus.describe_index()
logger.info(result)
# milvus.drop_index()
# milvus.create_index(index_type, nlist)
result = milvus.describe_index()
logger.info(result)
logger.info(milvus.count())
# preload index
milvus.preload_table()
logger.info("Start warm up query")
res = self.do_query(milvus, table_name, [1], [1], 1, 1)
logger.info("End warm up query")
# Run query test
for nprobe in nprobes:
logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
res = self.do_query(milvus, table_name, top_ks, nqs, nprobe, run_count)
headers = ["Nq/Top-k"]
headers.extend([str(top_k) for top_k in top_ks])
utils.print_table(headers, nqs, res)
utils.remove_container(container)
elif run_type == "insert_performance":
for op_type, op_value in definition.items():
# run docker mode
run_count = op_value["run_count"]
run_params = op_value["params"]
container = None
if not run_params:
logger.debug("No run params")
continue
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["table_name"]
volume_name = param["db_path_prefix"]
print(table_name)
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
for k, v in param.items():
if k.startswith("server."):
# Update server config
utils.modify_config(k, v, type="server", db_slave=None)
container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
time.sleep(2)
milvus = MilvusClient(table_name)
# Check has table or not
if milvus.exists_table():
milvus.delete()
time.sleep(10)
milvus.create_table(table_name, dimension, index_file_size, metric_type)
# debug
# milvus.create_index("ivf_sq8", 16384)
res = self.do_insert(milvus, table_name, data_type, dimension, table_size, param["ni_per"])
logger.info(res)
# wait for file merge
time.sleep(table_size * dimension / 5000000)
# Clear up
utils.remove_container(container)
elif run_type == "search_performance":
for op_type, op_value in definition.items():
# run docker mode
run_count = op_value["run_count"]
run_params = op_value["params"]
container = None
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
volume_name = param["db_path_prefix"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
for k, v in param.items():
if k.startswith("server."):
utils.modify_config(k, v, type="server")
container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
time.sleep(2)
milvus = MilvusClient(table_name)
logger.debug(milvus.show_tables())
# Check has table or not
if not milvus.exists_table():
logger.warning("Table %s not existed, continue exec next params ..." % table_name)
continue
# parse index info
index_types = param["index.index_types"]
nlists = param["index.nlists"]
# parse top-k, nq, nprobe
top_ks, nqs, nprobes = parser.search_params_parser(param)
for index_type in index_types:
for nlist in nlists:
result = milvus.describe_index()
logger.info(result)
# milvus.drop_index()
# milvus.create_index(index_type, nlist)
result = milvus.describe_index()
logger.info(result)
logger.info(milvus.count())
# preload index
milvus.preload_table()
logger.info("Start warm up query")
res = self.do_query(milvus, table_name, [1], [1], 1, 1)
logger.info("End warm up query")
# Run query test
for nprobe in nprobes:
logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
res = self.do_query(milvus, table_name, top_ks, nqs, nprobe, run_count)
headers = ["Nq/Top-k"]
headers.extend([str(top_k) for top_k in top_ks])
utils.print_table(headers, nqs, res)
utils.remove_container(container)
elif run_type == "accuracy":
"""
{
"dataset": "random_50m_1024_512",
"index.index_types": ["flat", ivf_flat", "ivf_sq8"],
"index.nlists": [16384],
"nprobes": [1, 32, 128],
"nqs": [100],
"top_ks": [1, 64],
"server.use_blas_threshold": 1100,
"server.cpu_cache_capacity": 256
}
"""
for op_type, op_value in definition.items():
if op_type != "query":
logger.warning("invalid operation: %s in accuracy test, only support query operation" % op_type)
break
run_count = op_value["run_count"]
run_params = op_value["params"]
container = None
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
sift_acc = False
if "sift_acc" in param:
sift_acc = param["sift_acc"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
for k, v in param.items():
if k.startswith("server."):
utils.modify_config(k, v, type="server")
volume_name = param["db_path_prefix"]
container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
time.sleep(2)
milvus = MilvusClient(table_name)
# Check has table or not
if not milvus.exists_table():
logger.warning("Table %s not existed, continue exec next params ..." % table_name)
continue
# parse index info
index_types = param["index.index_types"]
nlists = param["index.nlists"]
# parse top-k, nq, nprobe
top_ks, nqs, nprobes = parser.search_params_parser(param)
if sift_acc is True:
# preload groundtruth data
true_ids_all = self.get_groundtruth_ids(table_size)
acc_dict = {}
for index_type in index_types:
for nlist in nlists:
result = milvus.describe_index()
logger.info(result)
milvus.create_index(index_type, nlist)
# preload index
milvus.preload_table()
# Run query test
for nprobe in nprobes:
logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
for top_k in top_ks:
for nq in nqs:
result_ids = []
id_prefix = "%s_index_%s_nlist_%s_metric_type_%s_nprobe_%s_top_k_%s_nq_%s" % \
(table_name, index_type, nlist, metric_type, nprobe, top_k, nq)
if sift_acc is False:
self.do_query_acc(milvus, table_name, top_k, nq, nprobe, id_prefix)
if index_type != "flat":
# Compute accuracy
base_name = "%s_index_flat_nlist_%s_metric_type_%s_nprobe_%s_top_k_%s_nq_%s" % \
(table_name, nlist, metric_type, nprobe, top_k, nq)
avg_acc = self.compute_accuracy(base_name, id_prefix)
logger.info("Query: <%s> accuracy: %s" % (id_prefix, avg_acc))
else:
result_ids, result_distances = self.do_query_ids(milvus, table_name, top_k, nq, nprobe)
debug_file_ids = "0.5.3_result_ids"
debug_file_distances = "0.5.3_result_distances"
with open(debug_file_ids, "w+") as fd:
total = 0
for index, item in enumerate(result_ids):
true_item = true_ids_all[:nq, :top_k].tolist()[index]
tmp = set(item).intersection(set(true_item))
total = total + len(tmp)
fd.write("query: N-%d, intersection: %d, total: %d\n" % (index, len(tmp), total))
fd.write("%s\n" % str(item))
fd.write("%s\n" % str(true_item))
acc_value = self.get_recall_value(true_ids_all[:nq, :top_k].tolist(), result_ids)
logger.info("Query: <%s> accuracy: %s" % (id_prefix, acc_value))
# # print accuracy table
# headers = [table_name]
# headers.extend([str(top_k) for top_k in top_ks])
# utils.print_table(headers, nqs, res)
# remove container, and run next definition
logger.info("remove container, and run next definition")
utils.remove_container(container)
elif run_type == "stability":
for op_type, op_value in definition.items():
if op_type != "query":
logger.warning("invalid operation: %s in accuracy test, only support query operation" % op_type)
break
run_count = op_value["run_count"]
run_params = op_value["params"]
container = None
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
index_type = param["index_type"]
volume_name = param["db_path_prefix"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
# set default test time
if "during_time" not in param:
during_time = 100 # seconds
else:
during_time = int(param["during_time"]) * 60
# set default query process num
if "query_process_num" not in param:
query_process_num = 10
else:
query_process_num = int(param["query_process_num"])
for k, v in param.items():
if k.startswith("server."):
utils.modify_config(k, v, type="server")
container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
time.sleep(2)
milvus = MilvusClient(table_name)
# Check has table or not
if not milvus.exists_table():
logger.warning("Table %s not existed, continue exec next params ..." % table_name)
continue
start_time = time.time()
insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(10000)]
i = 0
while time.time() < start_time + during_time:
i = i + 1
processes = []
# do query
# for i in range(query_process_num):
# milvus_instance = MilvusClient(table_name)
# top_k = random.choice([x for x in range(1, 100)])
# nq = random.choice([x for x in range(1, 100)])
# nprobe = random.choice([x for x in range(1, 1000)])
# # logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
# p = Process(target=self.do_query, args=(milvus_instance, table_name, [top_k], [nq], [nprobe], run_count, ))
# processes.append(p)
# p.start()
# time.sleep(0.1)
# for p in processes:
# p.join()
milvus_instance = MilvusClient(table_name)
top_ks = random.sample([x for x in range(1, 100)], 3)
nqs = random.sample([x for x in range(1, 1000)], 3)
nprobe = random.choice([x for x in range(1, 500)])
res = self.do_query(milvus, table_name, top_ks, nqs, nprobe, run_count)
if i % 10 == 0:
status, res = milvus_instance.insert(insert_vectors, ids=[x for x in range(len(insert_vectors))])
if not status.OK():
logger.error(status)
# status = milvus_instance.drop_index()
# if not status.OK():
# logger.error(status)
# index_type = random.choice(["flat", "ivf_flat", "ivf_sq8"])
milvus_instance.create_index(index_type, 16384)
result = milvus.describe_index()
logger.info(result)
# milvus_instance.create_index("ivf_sq8", 16384)
utils.remove_container(container)
else:
logger.warning("Run type: %s not supported" % run_type)
|
engine.py
|
"""
"""
import logging
import sys
import smtplib
import os
from abc import ABC
from datetime import datetime
from email.message import EmailMessage
from queue import Empty, Queue
from threading import Thread
from typing import Any, Sequence, Type, Dict, List, Optional
from howtrader.event import Event, EventEngine
from .app import BaseApp
from .event import (
EVENT_TICK,
EVENT_TIMER,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG
)
from .gateway import BaseGateway
from .object import (
CancelRequest,
QueryRequest,
LogData,
OrderRequest,
SubscribeRequest,
HistoryRequest,
OrderData,
BarData,
TickData,
TradeData,
PositionData,
AccountData,
ContractData,
Exchange
)
from .setting import SETTINGS
from .utility import get_folder_path, TRADER_DIR
class MainEngine:
"""
Acts as the core of VN Trader.
"""
def __init__(self, event_engine: EventEngine = None):
""""""
if event_engine:
self.event_engine: EventEngine = event_engine
else:
self.event_engine = EventEngine()
self.event_engine.start()
self.gateways: Dict[str, BaseGateway] = {}
self.engines: Dict[str, BaseEngine] = {}
self.apps: Dict[str, BaseApp] = {}
self.exchanges: List[Exchange] = []
os.chdir(TRADER_DIR) # Change working directory
self.init_engines() # Initialize function engines
def add_engine(self, engine_class: Any) -> "BaseEngine":
"""
Add function engine.
"""
engine = engine_class(self, self.event_engine)
self.engines[engine.engine_name] = engine
return engine
def add_gateway(self, gateway_class: Type[BaseGateway]) -> BaseGateway:
"""
Add gateway.
"""
gateway = gateway_class(self.event_engine)
self.gateways[gateway.gateway_name] = gateway
# Add gateway supported exchanges into engine
for exchange in gateway.exchanges:
if exchange not in self.exchanges:
self.exchanges.append(exchange)
return gateway
def add_app(self, app_class: Type[BaseApp]) -> "BaseEngine":
"""
Add app.
"""
app = app_class()
self.apps[app.app_name] = app
engine = self.add_engine(app.engine_class)
return engine
def init_engines(self) -> None:
"""
Init all engines.
"""
self.add_engine(LogEngine)
self.add_engine(OmsEngine)
self.add_engine(EmailEngine)
def write_log(self, msg: str, source: str = "") -> None:
"""
Put log event with specific message.
"""
log = LogData(msg=msg, gateway_name=source)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def get_gateway(self, gateway_name: str) -> BaseGateway:
"""
Return gateway object by name.
"""
gateway = self.gateways.get(gateway_name, None)
if not gateway:
self.write_log(f"找不到底层接口:{gateway_name}")
return gateway
def get_engine(self, engine_name: str) -> "BaseEngine":
"""
Return engine object by name.
"""
engine = self.engines.get(engine_name, None)
if not engine:
self.write_log(f"找不到引擎:{engine_name}")
return engine
def get_default_setting(self, gateway_name: str) -> Optional[Dict[str, Any]]:
"""
Get default setting dict of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.get_default_setting()
return None
def get_all_gateway_names(self) -> List[str]:
"""
Get all names of gateway added in main engine.
"""
return list(self.gateways.keys())
def get_all_apps(self) -> List[BaseApp]:
"""
Get all app objects.
"""
return list(self.apps.values())
def get_all_exchanges(self) -> List[Exchange]:
"""
Get all exchanges.
"""
return self.exchanges
def connect(self, setting: dict, gateway_name: str) -> None:
"""
Start connection of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.connect(setting)
def subscribe(self, req: SubscribeRequest, gateway_name: str) -> None:
"""
Subscribe tick data update of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.subscribe(req)
def send_order(self, req: OrderRequest, gateway_name: str) -> str:
"""
Send new order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_order(req)
else:
return ""
def cancel_order(self, req: CancelRequest, gateway_name: str) -> None:
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_order(req)
def query_order(self, req: QueryRequest, gateway_name: str) -> None:
"""
Send query order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway and hasattr(gateway, 'query_order'):
gateway.query_order(req)
def send_orders(self, reqs: Sequence[OrderRequest], gateway_name: str) -> List[str]:
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_orders(reqs)
else:
return ["" for req in reqs]
def cancel_orders(self, reqs: Sequence[CancelRequest], gateway_name: str) -> None:
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_orders(reqs)
def query_history(self, req: HistoryRequest, gateway_name: str) -> Optional[List[BarData]]:
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_history(req)
else:
return None
def query_position(self):
"""
query the account and position
"""
for gateway in self.gateways.values():
gateway.query_position()
def close(self) -> None:
"""
Make sure every gateway and app is closed properly before
programme exit.
"""
# Stop event engine first to prevent new timer event.
self.event_engine.stop()
for engine in self.engines.values():
engine.close()
for gateway in self.gateways.values():
gateway.close()
class BaseEngine(ABC):
"""
Abstract class for implementing an function engine.
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
engine_name: str,
):
""""""
self.main_engine = main_engine
self.event_engine = event_engine
self.engine_name = engine_name
def close(self):
""""""
pass
class LogEngine(BaseEngine):
"""
Processes log event and output with logging module.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(LogEngine, self).__init__(main_engine, event_engine, "log")
if not SETTINGS["log.active"]:
return
self.level: int = SETTINGS["log.level"]
self.logger: logging.Logger = logging.getLogger("VN Trader")
self.logger.setLevel(self.level)
self.formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s"
)
self.add_null_handler()
if SETTINGS["log.console"]:
self.add_console_handler()
if SETTINGS["log.file"]:
self.add_file_handler()
self.register_event()
def add_null_handler(self) -> None:
"""
Add null handler for logger.
"""
null_handler = logging.NullHandler()
self.logger.addHandler(null_handler)
def add_console_handler(self) -> None:
"""
Add console output of log.
"""
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(self.level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
def add_file_handler(self) -> None:
"""
Add file output of log.
"""
today_date = datetime.now().strftime("%Y%m%d")
filename = f"vt_{today_date}.log"
log_path = get_folder_path("log")
file_path = log_path.joinpath(filename)
file_handler = logging.FileHandler(
file_path, mode="a", encoding="utf8"
)
file_handler.setLevel(self.level)
file_handler.setFormatter(self.formatter)
self.logger.addHandler(file_handler)
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_LOG, self.process_log_event)
def process_log_event(self, event: Event) -> None:
"""
Process log event.
"""
log = event.data
self.logger.log(log.level, log.msg)
class OmsEngine(BaseEngine):
"""
Provides order management system function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(OmsEngine, self).__init__(main_engine, event_engine, "oms")
self.ticks: Dict[str, TickData] = {}
self.orders: Dict[str, OrderData] = {}
self.trades: Dict[str, TradeData] = {}
self.positions: Dict[str, PositionData] = {}
self.accounts: Dict[str, AccountData] = {}
self.contracts: Dict[str, ContractData] = {}
self.active_orders: Dict[str, OrderData] = {}
self.add_function()
self.register_event()
self.order_update_interval = 0 # for counting the timer.
self.position_update_interval = 0
def add_function(self) -> None:
"""Add query function to main engine."""
self.main_engine.get_tick = self.get_tick
self.main_engine.get_order = self.get_order
self.main_engine.get_trade = self.get_trade
self.main_engine.get_position = self.get_position
self.main_engine.get_account = self.get_account
self.main_engine.get_contract = self.get_contract
self.main_engine.get_all_ticks = self.get_all_ticks
self.main_engine.get_all_orders = self.get_all_orders
self.main_engine.get_all_trades = self.get_all_trades
self.main_engine.get_all_positions = self.get_all_positions
self.main_engine.get_all_accounts = self.get_all_accounts
self.main_engine.get_all_contracts = self.get_all_contracts
self.main_engine.get_all_active_orders = self.get_all_active_orders
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
self.event_engine.register(EVENT_TIMER, self.process_timer)
def process_tick_event(self, event: Event) -> None:
""""""
tick = event.data
self.ticks[tick.vt_symbol] = tick
def process_order_event(self, event: Event) -> None:
""""""
order = event.data
self.orders[order.vt_orderid] = order
# If order is active, then update data in dict.
if order.is_active():
self.active_orders[order.vt_orderid] = order
# Otherwise, pop inactive order from in dict
elif order.vt_orderid in self.active_orders:
self.active_orders.pop(order.vt_orderid)
def process_trade_event(self, event: Event) -> None:
""""""
trade = event.data
self.trades[trade.vt_tradeid] = trade
def process_position_event(self, event: Event) -> None:
""""""
position = event.data
self.positions[position.vt_positionid] = position
def process_account_event(self, event: Event) -> None:
""""""
account = event.data
self.accounts[account.vt_accountid] = account
def process_contract_event(self, event: Event) -> None:
""""""
contract = event.data
self.contracts[contract.vt_symbol] = contract
def process_timer(self, event: Event) -> None:
"""
update the orders, positions by timer, for we may be disconnected from server update push.
"""
self.order_update_interval += 1
self.position_update_interval += 1
if self.order_update_interval >= SETTINGS.get('order_update_interval', 120):
self.order_update_interval = 0
orders = self.get_all_active_orders()
for order in orders:
if order.datetime and (datetime.now(order.datetime.tzinfo) - order.datetime).seconds > SETTINGS.get('order_update_timer', 120):
req = order.create_query_request()
self.main_engine.query_order(req, order.gateway_name)
if self.position_update_interval >= SETTINGS.get('position_update_interval', 120):
self.main_engine.query_position()
self.position_update_interval = 0
def get_tick(self, vt_symbol: str) -> Optional[TickData]:
"""
Get latest market tick data by vt_symbol.
"""
return self.ticks.get(vt_symbol, None)
def get_order(self, vt_orderid: str) -> Optional[OrderData]:
"""
Get latest order data by vt_orderid.
"""
return self.orders.get(vt_orderid, None)
def get_trade(self, vt_tradeid: str) -> Optional[TradeData]:
"""
Get trade data by vt_tradeid.
"""
return self.trades.get(vt_tradeid, None)
def get_position(self, vt_positionid: str) -> Optional[PositionData]:
"""
Get latest position data by vt_positionid.
"""
return self.positions.get(vt_positionid, None)
def get_account(self, vt_accountid: str) -> Optional[AccountData]:
"""
Get latest account data by vt_accountid.
"""
return self.accounts.get(vt_accountid, None)
def get_contract(self, vt_symbol: str) -> Optional[ContractData]:
"""
Get contract data by vt_symbol.
"""
return self.contracts.get(vt_symbol, None)
def get_all_ticks(self) -> List[TickData]:
"""
Get all tick data.
"""
return list(self.ticks.values())
def get_all_orders(self) -> List[OrderData]:
"""
Get all order data.
"""
return list(self.orders.values())
def get_all_trades(self) -> List[TradeData]:
"""
Get all trade data.
"""
return list(self.trades.values())
def get_all_positions(self) -> List[PositionData]:
"""
Get all position data.
"""
return list(self.positions.values())
def get_all_accounts(self) -> List[AccountData]:
"""
Get all account data.
"""
return list(self.accounts.values())
def get_all_contracts(self) -> List[ContractData]:
"""
Get all contract data.
"""
return list(self.contracts.values())
def get_all_active_orders(self, vt_symbol: str = "") -> List[OrderData]:
"""
Get all active orders by vt_symbol.
If vt_symbol is empty, return all active orders.
"""
if not vt_symbol:
return list(self.active_orders.values())
else:
active_orders = [
order
for order in self.active_orders.values()
if order.vt_symbol == vt_symbol
]
return active_orders
class EmailEngine(BaseEngine):
"""
Provides email sending function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(EmailEngine, self).__init__(main_engine, event_engine, "email")
self.thread: Thread = Thread(target=self.run)
self.queue: Queue = Queue()
self.active: bool = False
self.main_engine.send_email = self.send_email
def send_email(self, subject: str, content: str, receiver: str = "") -> None:
""""""
# Start email engine when sending first email.
if not self.active:
self.start()
# Use default receiver if not specified.
if not receiver:
receiver = SETTINGS["email.receiver"]
msg = EmailMessage()
msg["From"] = SETTINGS["email.sender"]
msg["To"] = receiver
msg["Subject"] = subject
msg.set_content(content)
self.queue.put(msg)
def run(self) -> None:
""""""
while self.active:
try:
msg = self.queue.get(block=True, timeout=1)
with smtplib.SMTP_SSL(
SETTINGS["email.server"], SETTINGS["email.port"]
) as smtp:
smtp.login(
SETTINGS["email.username"], SETTINGS["email.password"]
)
smtp.send_message(msg)
self.write_log(f"发送邮件成功 - {msg}")
except Empty:
pass
except Exception:
et, ev, tb = sys.exc_info()
self.write_log(f"发送邮件失败 - {et.__name__}: {ev}")
sys.excepthook(et, ev, tb)
def start(self) -> None:
""""""
self.active = True
self.thread.start()
def close(self) -> None:
""""""
if not self.active:
return
self.active = False
self.thread.join()
def write_log(self, msg: str) -> None:
"""
Put log event with specific message.
"""
log = LogData(msg=msg, gateway_name="EMAIL")
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
|
papers.py
|
import os
import ctypes
import time
import json
import tempfile
import requests
from threading import Thread, Event
from screeninfo import get_monitors
from flytrap import *
# version = 'v0.1.6'
# activeParams = { "group": "", "": "username" }
# displayed = []
# paper_count = 0
# displays = get_monitors()
# primary_display = False
# for m in displays:
# if primary_display == False or m.width > primary_display.width:
# primary_display = m
# tempdir = tempfile.gettempdir()
# alldir = list(filter(os.path.isdir, os.listdir(tempdir)))
# flypaperdir = ''
# for d in alldir:
# dirname = os.path.dirname(d)
# if dirname.startswith('flypaper'):
# flypaperdir = d
# if len(flypaperdir) == 0:
# flypaperdir = tempfile.mkdtemp(prefix='flypaper')
# papers = []
# for (dirpath, dirnames, filenames) in os.walk(flypaperdir):
# papers.extend(filenames)
# break
def logPaper(paperId):
print("Log Paper ID", paperId)
displayed.append(paperId)
def logParams(group, username):
if group != activeParams['group'] or username != activeParams['username']:
print("Clearing displayed")
displayed.clear()
activeParams['group'] = group
activeParams['username'] = username
def fetchPaper(group, username, progress_callback):
# print("fetch paper")
if len(papers) > 1:
# print("Already has paper in queue")
return
logParams(group, username)
# If we have almost viewed all matching papers, clear
if len(displayed) >= activeParams['count']:
displayed.clear()
# statusMessage.setText('Searching for FlyPaper')
progress_callback.emit({ "papers": papers, "status": "Searching for FlyPaper" })
query = {'limit': 1, 'search': '', 'skip': json.dumps(displayed) }
if group == 'Featured':
query['search'] = 'featured'
if group == 'Liked by':
query['search'] = 'liked:' + username
if group == 'Created by':
query['search'] = username
response = requests.get('https://flypaper.theflyingfabio.com/api/paper/random', params=query)
data = response.json()
if data['count'] == 0:
progress_callback.emit({ "papers": papers, "status": "No Flypaper Found" })
activeParams['count'] = data['count']
# print("Paper Count", activeParams['count'])
for paper in data['papers']:
if paper['id']:
savePaper(paper, group, username, progress_callback)
return { "fetched": True }
def savePaper(paper, group, username, progress_callback):
# statusMessage.setText('Downloading FlyPaper')
progress_callback.emit({ "papers": papers, "status": "Downloading FlyPaper" })
id = paper['id']
filename = paper['filename']
url = "https://flypaper.theflyingfabio.com/render/" + str(id)
query = { 'w': primary_display.width }
response = requests.get(url, stream=True, params=query, verify=False)
block_size = 1024 #1 Kibibyte
file_path = os.path.join(flypaperdir, filename)
with open(file_path, 'wb') as file:
for data in response.iter_content(block_size):
file.write(data)
# insert at the start of the array
# print("save the paper")
papers.append(paper)
progress_callback.emit({ "papers": papers, "status": "" })
# statusMessage.setText('')
# if len(papers) < 1:
# fetchPaper(group, username, statusMessage)
# def threadedFetch(group, username):
# fetchThread = Thread(target = fetchPaper, args = (group, username))
# fetchThread.setDaemon(True)
# fetchThread.start()
def swapPaper(group, username, progress_callback):
# statusMessage.setText('Swapping')
progress_callback.emit({ "papers": papers, "status": "Swapping" })
# print("swap the papers from ", papers)
if (len(papers) >= 1):
paper = papers.pop()
logPaper(paper['id'])
path = os.path.join(flypaperdir, paper['filename'])
ctypes.windll.user32.SystemParametersInfoW(20, 0, path , 0)
currentPaper = paper
# statusMessage.setText('')
progress_callback.emit({ "papers": papers, "status": "", "fetch": True })
if len(papers) < 1:
return { "fetch": True }
def getScheduleInSeconds(schedule):
seconds = {
'Every 2 hours': 60 * 120,
'Every hour': 60 * 60,
'Every 30 mins': 60 * 30,
'Every 10 mins': 60 * 10,
'Every 5 mins': 60 * 5,
'Every minute': 60,
'Every 30 seconds': 30,
'Every 10 seconds': 10,
}
return seconds.get(schedule, 60)
def scheduledPaperSwap(schedule, group, username, exit_event, progress_callback):
start_time = time.time()
seconds = getScheduleInSeconds(schedule)
# print("start scheduler")
# print("tick")
swapPaper(group, username, progress_callback)
# print("sleeping for ", seconds)
# statusMessage.setText('')
progress_callback.emit({ "papers": papers, "status": "" })
time.sleep(seconds - ((time.time() - start_time) % seconds))
# print("awake", exit_event.is_set())
if not exit_event.is_set():
# threadedSwap(schedule, group, username, exit_event)
return { "swap": True }
# def threadedSwap(schedule, group, username, exit_event):
# fetchThread = Thread(target = scheduledPaperSwap, args = (schedule, group, username, exit_event))
# fetchThread.setDaemon(True)
# fetchThread.start()
|
train.py
|
#!/usr/bin/env python
import os
import json
import torch
import numpy as np
import queue
import pprint
import random
import argparse
import importlib
import threading
import traceback
from tqdm import tqdm
from utils import stdout_to_tqdm
from config import system_configs
from nnet.py_factory import NetworkFactory
from torch.multiprocessing import Process, Queue, Pool
from db.datasets import datasets
from tensorboardX import SummaryWriter ###
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def parse_args():
parser = argparse.ArgumentParser(description="Train CornerNet")
parser.add_argument("cfg_file", help="config file", type=str)
parser.add_argument("--iter", dest="start_iter",
help="train at iteration i",
default=0, type=int)
parser.add_argument("--threads", dest="threads", default=4, type=int)
args = parser.parse_args()
return args
def prefetch_data(db, queue, sample_data, data_aug):
ind = 0
print("start prefetching data...")
np.random.seed(os.getpid())
while True:
try:
data, ind = sample_data(db, ind, data_aug=data_aug)
queue.put(data)
except Exception as e:
traceback.print_exc()
raise e
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
data = data_queue.get()
data["xs"] = [x.pin_memory() for x in data["xs"]]
data["ys"] = [y.pin_memory() for y in data["ys"]]
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return
def init_parallel_jobs(dbs, queue, fn, data_aug):
tasks = [Process(target=prefetch_data, args=(db, queue, fn, data_aug)) for db in dbs]
for task in tasks:
task.daemon = True
task.start()
return tasks
def train(training_dbs, validation_db, start_iter=0):
learning_rate = system_configs.learning_rate
max_iteration = system_configs.max_iter
pretrained_model = system_configs.pretrain
snapshot = system_configs.snapshot
val_iter = system_configs.val_iter
display = system_configs.display
decay_rate = system_configs.decay_rate
stepsize = system_configs.stepsize
logger = SummaryWriter('./logs/')
# getting the size of each database
training_size = len(training_dbs[0].db_inds)
validation_size = len(validation_db.db_inds)
# queues storing data for training
training_queue = Queue(system_configs.prefetch_size)
validation_queue = Queue(5)
# queues storing pinned data for training
pinned_training_queue = queue.Queue(system_configs.prefetch_size)
pinned_validation_queue = queue.Queue(5)
# load data sampling function
data_file = "sample.{}".format(training_dbs[0].data)
sample_data = importlib.import_module(data_file).sample_data
# allocating resources for parallel reading
training_tasks = init_parallel_jobs(training_dbs, training_queue, sample_data, True)
if val_iter:
validation_tasks = init_parallel_jobs([validation_db], validation_queue, sample_data, False)
training_pin_semaphore = threading.Semaphore()
validation_pin_semaphore = threading.Semaphore()
training_pin_semaphore.acquire()
validation_pin_semaphore.acquire()
training_pin_args = (training_queue, pinned_training_queue, training_pin_semaphore)
training_pin_thread = threading.Thread(target=pin_memory, args=training_pin_args)
training_pin_thread.daemon = True
training_pin_thread.start()
validation_pin_args = (validation_queue, pinned_validation_queue, validation_pin_semaphore)
validation_pin_thread = threading.Thread(target=pin_memory, args=validation_pin_args)
validation_pin_thread.daemon = True
validation_pin_thread.start()
print("building model...")
nnet = NetworkFactory(training_dbs[0])
if pretrained_model is not None:
if not os.path.exists(pretrained_model):
raise ValueError("pretrained model does not exist")
print("loading from pretrained model")
nnet.load_pretrained_params(pretrained_model)
if start_iter:
learning_rate /= (decay_rate ** (start_iter // stepsize))
nnet.load_params(start_iter)
nnet.set_lr(learning_rate)
print("training starts from iteration {} with learning_rate {}".format(start_iter + 1, learning_rate))
else:
nnet.set_lr(learning_rate)
print("training start...")
nnet.cuda()
nnet.train_mode()
with stdout_to_tqdm() as save_stdout:
for iteration in tqdm(range(start_iter + 1, max_iteration + 1), file=save_stdout, ncols=80):
training = pinned_training_queue.get(block=True)
training_loss = nnet.train(**training)
if display and iteration % display == 0:
logger.add_scalars('losses', {'train': training_loss}, iteration) ###
print("training loss at iteration {}: {}".format(iteration, training_loss.item()))
del training_loss
if val_iter and validation_db.db_inds.size and iteration % val_iter == 0:
nnet.eval_mode()
validation = pinned_validation_queue.get(block=True)
validation_loss = nnet.validate(**validation)
logger.add_scalars('losses', {'val': validation_loss}, iteration) ###
logger.add_scalar('lr', learning_rate, iteration) ###
print("validation loss at iteration {}: {}".format(iteration, validation_loss.item()))
nnet.train_mode()
if iteration % snapshot == 0:
nnet.save_params(iteration)
if iteration % stepsize == 0:
learning_rate /= decay_rate
nnet.set_lr(learning_rate)
# sending signal to kill the thread
training_pin_semaphore.release()
validation_pin_semaphore.release()
# terminating data fetching processes
for training_task in training_tasks:
training_task.terminate()
for validation_task in validation_tasks:
validation_task.terminate()
if __name__ == "__main__":
args = parse_args()
cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
with open(cfg_file, "r") as f:
configs = json.load(f)
configs["system"]["snapshot_name"] = args.cfg_file
system_configs.update_config(configs["system"])
train_split = system_configs.train_split
val_split = system_configs.val_split
print("loading all datasets...")
dataset = system_configs.dataset
# threads = max(torch.cuda.device_count() * 2, 4)
threads = args.threads
print("using {} threads".format(threads))
training_dbs = [datasets[dataset](configs["db"], train_split) for _ in range(threads)]
validation_db = datasets[dataset](configs["db"], val_split)
print("system config...")
pprint.pprint(system_configs.full)
print("db config...")
pprint.pprint(training_dbs[0].configs)
print("len of db: {}".format(len(training_dbs[0].db_inds)))
train(training_dbs, validation_db, args.start_iter)
|
parallel_runner.py
|
from envs import REGISTRY as env_REGISTRY
from functools import partial
from components.episode_buffer import EpisodeBatch
from multiprocessing import Pipe, Process
import numpy as np
import torch as th
# Based (very) heavily on SubprocVecEnv from OpenAI Baselines
# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/subproc_vec_env.py
class ParallelRunner:
def __init__(self, args, logger):
self.args = args
self.logger = logger
self.batch_size = self.args.batch_size_run
# Make subprocesses for the envs
self.parent_conns, self.worker_conns = zip(*[Pipe() for _ in range(self.batch_size)])
env_fn = env_REGISTRY[self.args.env]
self.ps = [Process(target=env_worker, args=(worker_conn, CloudpickleWrapper(partial(env_fn, **self.args.env_args))))
for worker_conn in self.worker_conns]
for p in self.ps:
p.daemon = True
p.start()
self.parent_conns[0].send(("get_env_info", None))
self.env_info = self.parent_conns[0].recv()
self.episode_limit = self.env_info["episode_limit"]
self.t = 0
self.t_env = 0
self.train_returns = []
self.test_returns = []
self.train_stats = {}
self.test_stats = {}
self.log_train_stats_t = -100000
def setup(self, scheme, groups, preprocess, mac):
self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,
preprocess=preprocess, device=self.args.device)
self.mac = mac
self.scheme = scheme
self.groups = groups
self.preprocess = preprocess
def get_env_info(self):
return self.env_info
def save_replay(self):
pass
def close_env(self):
for parent_conn in self.parent_conns:
parent_conn.send(("close", None))
def reset(self):
self.batch = self.new_batch()
# Reset the envs
for parent_conn in self.parent_conns:
parent_conn.send(("reset", None))
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Get the obs, state and avail_actions back
for parent_conn in self.parent_conns:
data = parent_conn.recv()
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
self.batch.update(pre_transition_data, ts=0)
self.t = 0
self.env_steps_this_run = 0
def run(self, test_mode=False):
self.reset()
all_terminated = False
episode_returns = [0 for _ in range(self.batch_size)]
episode_lengths = [0 for _ in range(self.batch_size)]
self.mac.init_hidden(batch_size=self.batch_size)
terminated = [False for _ in range(self.batch_size)]
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
final_env_infos = [] # may store extra stats like battle won. this is filled in ORDER OF TERMINATION
while True:
# Pass the entire batch of experiences up till now to the agents
# Receive the actions for each agent at this timestep in a batch for each un-terminated env
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated, test_mode=test_mode)
cpu_actions = actions.to("cpu").numpy()
# Update the actions taken
actions_chosen = {
"actions": actions.unsqueeze(1)
}
self.batch.update(actions_chosen, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Send actions to each env
action_idx = 0
for idx, parent_conn in enumerate(self.parent_conns):
if idx in envs_not_terminated: # We produced actions for this env
if not terminated[idx]: # Only send the actions to the env if it hasn't terminated
parent_conn.send(("step", cpu_actions[action_idx]))
action_idx += 1 # actions is not a list over every env
# Update envs_not_terminated
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
all_terminated = all(terminated)
if all_terminated:
break
# Post step data we will insert for the current timestep
post_transition_data = {
"reward": [],
"terminated": []
}
# Data for the next step we will insert in order to select an action
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Receive data back for each unterminated env
for idx, parent_conn in enumerate(self.parent_conns):
if not terminated[idx]:
data = parent_conn.recv()
# Remaining data for this current timestep
post_transition_data["reward"].append((data["reward"],))
episode_returns[idx] += data["reward"]
episode_lengths[idx] += 1
if not test_mode:
self.env_steps_this_run += 1
env_terminated = False
if data["terminated"]:
final_env_infos.append(data["info"])
if data["terminated"] and not data["info"].get("episode_limit", False):
env_terminated = True
terminated[idx] = data["terminated"]
post_transition_data["terminated"].append((env_terminated,))
# Data for the next timestep needed to select an action
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
# Add post_transiton data into the batch
self.batch.update(post_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Move onto the next timestep
self.t += 1
# Add the pre-transition data
self.batch.update(pre_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=True)
if not test_mode:
self.t_env += self.env_steps_this_run
# Get stats back for each env
for parent_conn in self.parent_conns:
parent_conn.send(("get_stats",None))
env_stats = []
for parent_conn in self.parent_conns:
env_stat = parent_conn.recv()
env_stats.append(env_stat)
cur_stats = self.test_stats if test_mode else self.train_stats
cur_returns = self.test_returns if test_mode else self.train_returns
log_prefix = "test_" if test_mode else ""
infos = [cur_stats] + final_env_infos
cur_stats.update({k: sum(d.get(k, 0) for d in infos) for k in set.union(*[set(d) for d in infos])})
cur_stats["n_episodes"] = self.batch_size + cur_stats.get("n_episodes", 0)
cur_stats["ep_length"] = sum(episode_lengths) + cur_stats.get("ep_length", 0)
cur_returns.extend(episode_returns)
n_test_runs = max(1, self.args.test_nepisode // self.batch_size) * self.batch_size
if test_mode and (len(self.test_returns) == n_test_runs):
self._log(cur_returns, cur_stats, log_prefix)
elif self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:
self._log(cur_returns, cur_stats, log_prefix)
if hasattr(self.mac.action_selector, "epsilon"):
self.logger.log_stat("epsilon", self.mac.action_selector.epsilon, self.t_env)
self.log_train_stats_t = self.t_env
return self.batch
def _log(self, returns, stats, prefix):
self.logger.log_stat(prefix + "return_mean", np.mean(returns), self.t_env)
self.logger.log_stat(prefix + "return_std", np.std(returns), self.t_env)
returns.clear()
for k, v in stats.items():
if k != "n_episodes":
self.logger.log_stat(prefix + k + "_mean" , v/stats["n_episodes"], self.t_env)
stats.clear()
def env_worker(remote, env_fn):
# Make environment
env = env_fn.x()
while True:
cmd, data = remote.recv()
if cmd == "step":
actions = data
# Take a step in the environment
reward, terminated, env_info = env.step(actions)
# Return the observations, avail_actions and state to make the next action
state = env.get_state()
avail_actions = env.get_avail_actions()
obs = env.get_obs()
remote.send({
# Data for the next timestep needed to pick an action
"state": state,
"avail_actions": avail_actions,
"obs": obs,
# Rest of the data for the current timestep
"reward": reward,
"terminated": terminated,
"info": env_info
})
elif cmd == "reset":
env.reset()
remote.send({
"state": env.get_state(),
"avail_actions": env.get_avail_actions(),
"obs": env.get_obs()
})
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_env_info":
remote.send(env.get_env_info())
elif cmd == "get_stats":
remote.send(env.get_stats())
else:
raise NotImplementedError
class CloudpickleWrapper():
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
|
mock_request_demo.py
|
# -*- coding: utf-8 -*-
import threading
import requests
url = 'http://127.0.0.1:5000'
def req():
response = requests.get(url)
return response
if __name__ == '__main__':
for i in range(20):
t = threading.Thread(target=req)
t.start()
t.join()
|
uploadWorker.py
|
import threading
from queue import Queue
from common_functions import send
import socket
class Worker():
_run_thread: threading.Thread
_uploading_info: Queue
_sock: socket.socket
_information_info: Queue
def __init__(self, s: socket.socket):
self._uploading_info = Queue()
self._sock = s
self._information_info = Queue()
self._run_thread = threading.Thread(target=self.runner)
self._run_thread.start()
def upload(self, items: any):
self._uploading_info.put(items)
def stop(self):
self._information_info.put("Kill")
self._run_thread.join()
def runner(self):
while True:
if not self._information_info.empty():
break
if not self._uploading_info.empty():
send("data", self._uploading_info.get_nowait(), self._sock)
|
async.py
|
"""
raven.contrib.async
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from Queue import Queue
from raven.base import Client
from threading import Thread, Lock
import atexit
import os
SENTRY_WAIT_SECONDS = 10
class AsyncWorker(object):
_terminator = object()
def __init__(self):
self._queue = Queue(-1)
self._lock = Lock()
self._thread = None
self.start()
def main_thread_terminated(self):
size = self._queue.qsize()
if size:
print "Sentry attempts to send %s error messages" % size
print "Waiting up to %s seconds" % SENTRY_WAIT_SECONDS
if os.name == 'nt':
print "Press Ctrl-Break to quit"
else:
print "Press Ctrl-C to quit"
self.stop(timeout=SENTRY_WAIT_SECONDS)
def start(self):
"""
Starts the task thread.
"""
self._lock.acquire()
try:
if not self._thread:
self._thread = Thread(target=self._target)
self._thread.setDaemon(True)
self._thread.start()
finally:
self._lock.release()
atexit.register(self.main_thread_terminated)
def stop(self, timeout=None):
"""
Stops the task thread. Synchronous!
"""
self._lock.acquire()
try:
if self._thread:
self._queue.put_nowait(self._terminator)
self._thread.join(timeout=timeout)
self._thread = None
finally:
self._lock.release()
def queue(self, callback, kwargs):
self._queue.put_nowait((callback, kwargs))
def _target(self):
while 1:
record = self._queue.get()
if record is self._terminator:
break
callback, kwargs = record
callback(**kwargs)
class AsyncClient(Client):
"""
This client uses a single background thread to dispatch errors.
"""
def __init__(self, worker=None, *args, **kwargs):
self.worker = worker or AsyncWorker()
super(AsyncClient, self).__init__(*args, **kwargs)
def send_sync(self, **kwargs):
super(AsyncClient, self).send(**kwargs)
def send(self, **kwargs):
self.worker.queue(self.send_sync, kwargs)
class SentryWorker(object):
"""
A WSGI middleware which provides ``environ['raven.worker']``
that can be used by clients to process asynchronous tasks.
>>> from raven.base import Client
>>> application = SentryWorker(application)
"""
def __init__(self, application):
self.application = application
self.worker = AsyncWorker()
def __call__(self, environ, start_response):
environ['raven.worker'] = self.worker
for event in self.application(environ, start_response):
yield event
|
repair_manager.py
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017 beyond-blockchain.org.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import hashlib
import time
import threading
import queue
import os
import sys
sys.path.extend(["../../", os.path.abspath(os.path.dirname(__file__))])
from bbc1.core.data_handler import DataHandler
from bbc1.core.bbc_stats import BBcStats
import bbclib
from bbc1.core.message_key_types import PayloadType, KeyType, InfraMessageCategory
from bbc1.core import logger
class RepairManager:
"""Data repair manager for forged transaction/asset"""
REQUEST_REPAIR_TRANSACTION = 0
REQUEST_REPAIR_ASSET_FILE = 1
REQUEST_TO_SEND_TRANSACTION_DATA = 2
RESPONSE_TRANSACTION_DATA = 3
REQUEST_TO_SEND_ASSET_FILE = 4
RESPONSE_ASSET_FILE = 5
def __init__(self, network=None, domain_id=None, workingdir=".", loglevel="all", logname=None):
if network is not None:
self.network = network
self.core = network.core
self.stats = network.core.stats
self.data_handler = network.domains[domain_id]['data']
else:
self.stats = BBcStats()
self.repair_log = os.path.join(workingdir, domain_id.hex(), "repair_log.json")
self.logger = logger.get_logger(key="repair_manager", level=loglevel, logname=logname)
self.domain_id = domain_id
self.queue = queue.Queue()
self.requesting_list = dict()
self.loop_flag = True
th_nw_loop = threading.Thread(target=self._manager_loop)
th_nw_loop.setDaemon(True)
th_nw_loop.start()
def _output_log(self, repair_info):
"""Output log in json format"""
with open(self.repair_log, "a") as f:
f.write(json.dumps(repair_info)+"\n")
def exit_loop(self):
"""Exit the manager loop"""
self.loop_flag = False
self.put_message()
def _manager_loop(self):
"""Main loop"""
while self.loop_flag:
msg = self.queue.get()
if msg is None:
continue
if msg[KeyType.command] == RepairManager.REQUEST_REPAIR_TRANSACTION:
self._repair_transaction_data(msg[KeyType.transaction_id])
elif msg[KeyType.command] == RepairManager.REQUEST_REPAIR_ASSET_FILE:
self._repair_asset_file(msg[KeyType.asset_group_id], msg[KeyType.asset_id])
elif msg[KeyType.command] == RepairManager.REQUEST_TO_SEND_TRANSACTION_DATA:
self._send_transaction_data(msg)
elif msg[KeyType.command] == RepairManager.RESPONSE_TRANSACTION_DATA:
self._receive_transaction_data_from_others(msg)
elif msg[KeyType.command] == RepairManager.REQUEST_TO_SEND_ASSET_FILE:
self._send_asset_file(msg)
elif msg[KeyType.command] == RepairManager.RESPONSE_ASSET_FILE:
self._receive_asset_file_from_others(msg)
def put_message(self, msg=None):
"""append a message to the queue"""
self.queue.put(msg)
def _repair_transaction_data(self, transaction_id):
"""Repair forged transaction_data or asset_file by getting legitimate one from other nodes
Args:
transaction_id (bytes): target transaction_id
"""
#print("_repair_transaction_data:")
self.stats.update_stats_increment("transaction", "repair_request", 1)
forged_asset_files = set()
if len(self.data_handler.db_adaptors) > 1:
valid_txobj = None
db_nums_with_invalid_data = list()
for idx in range(1, len(self.data_handler.db_adaptors)):
result_txobj, result_asset_files = self.data_handler.search_transaction(transaction_id=transaction_id, db_num=idx)
txobj_is_valid, valid_assets, invalid_assets = bbclib.validate_transaction_object(result_txobj[0],
result_asset_files)
if txobj_is_valid and valid_txobj is None:
valid_txobj = result_txobj[0]
if not txobj_is_valid:
db_nums_with_invalid_data.append(idx)
if len(invalid_assets) > 0:
for ent in invalid_assets:
forged_asset_files.add(ent)
if valid_txobj is None:
self.stats.update_stats_increment("transaction", "fail_to_repair_in_local", 1)
self.logger.fatal("Failed to repair transaction locally (transaction_id=%s in domain=%s)" %
(transaction_id.hex(), self.domain_id.hex()))
else:
for i in db_nums_with_invalid_data:
self.data_handler.restore_transaction_data(db_num=i, transaction_id=transaction_id, txobj=valid_txobj)
self.stats.update_stats_increment("transaction", "success_repair", 1)
self._output_log({"transaction_id": transaction_id.hex(), "request_at": int(time.time()),
"repaired_by": "locally", "repaired_at": int(time.time())})
if len(forged_asset_files) > 0:
for asgid, ast in forged_asset_files:
self._repair_asset_file(asset_group_id=asgid, asset_id=ast, need_check=False)
if self.data_handler.replication_strategy == DataHandler.REPLICATION_EXT:
return
random_nonce = bbclib.get_random_value(4)
while random_nonce in self.requesting_list:
random_nonce = bbclib.get_random_value(4)
self.requesting_list[random_nonce] = {
"transaction_id": transaction_id.hex(),
"request_at": int(time.time())
}
msg = {
KeyType.domain_id: self.domain_id,
KeyType.infra_msg_type: InfraMessageCategory.CATEGORY_DATA,
KeyType.infra_command: DataHandler.REPAIR_TRANSACTION_DATA,
KeyType.command: RepairManager.REQUEST_TO_SEND_TRANSACTION_DATA,
KeyType.transaction_id: transaction_id,
KeyType.nonce: random_nonce,
}
self.network.broadcast_message_in_network(domain_id=self.domain_id,
payload_type=PayloadType.Type_any, msg=msg)
return
def _repair_asset_file(self, asset_group_id, asset_id, need_check=True):
"""Repair forged asset_file by getting legitimate one from other nodes
Args:
asset_group_id (bytes): asset_group_id of the asset
asset_id (bytes): asset_id of the asset
need_check (bool): If True, check the digest of the asset file
"""
#print("_repair_asset_file:")
if self.data_handler.use_external_storage:
return
if need_check:
asset_file = self.data_handler.get_in_storage(asset_group_id, asset_id)
if asset_file is not None and asset_id == hashlib.sha256(asset_file).digest():
return
random_nonce = bbclib.get_random_value(4)
while random_nonce in self.requesting_list:
random_nonce = bbclib.get_random_value(4)
self.requesting_list[random_nonce] = {
"asset_group_id": asset_group_id.hex(),
"request_at": int(time.time())
}
if asset_id is not None:
self.requesting_list[random_nonce]["asset_id"] = asset_id.hex()
msg = {
KeyType.domain_id: self.domain_id,
KeyType.infra_msg_type: InfraMessageCategory.CATEGORY_DATA,
KeyType.infra_command: DataHandler.REPAIR_TRANSACTION_DATA,
KeyType.command: RepairManager.REQUEST_TO_SEND_ASSET_FILE,
KeyType.asset_group_id: asset_group_id,
KeyType.asset_id: asset_id,
KeyType.nonce: random_nonce,
}
self.network.broadcast_message_in_network(domain_id=self.domain_id,
payload_type=PayloadType.Type_any, msg=msg)
def _send_transaction_data(self, dat):
"""Send transaction data if having valid one"""
#print("_send_transaction_data::")
transaction_id = dat[KeyType.transaction_id]
for idx in range(len(self.data_handler.db_adaptors)):
result_txobj, result_asset_files = self.data_handler.search_transaction(transaction_id=transaction_id, db_num=idx)
txobj_is_valid, valid_assets, invalid_assets = bbclib.validate_transaction_object(result_txobj[transaction_id])
if txobj_is_valid:
dat[KeyType.command] = RepairManager.RESPONSE_TRANSACTION_DATA
dat[KeyType.transaction_data] = bbclib.serialize(result_txobj[transaction_id])
dat[KeyType.destination_node_id] = dat[KeyType.source_node_id]
self.network.send_message_in_network(None, domain_id=self.domain_id, msg=dat)
return
def _receive_transaction_data_from_others(self, dat):
"""Receive transaction data from other core_nodes and check its validity
Args:
dat (dict): received message
"""
#print("_receive_transaction_data_from_others:")
if KeyType.transaction_data not in dat or KeyType.transaction_id not in dat or KeyType.nonce not in dat:
return
if dat[KeyType.nonce] not in self.requesting_list:
return
asset_files = dict()
if KeyType.all_asset_files in dat:
asset_files = dat[KeyType.all_asset_files]
txobj, fmt_type = bbclib.deserialize(dat[KeyType.transaction_data])
if txobj.transaction_data is None:
return
txobj_is_valid, valid_assets, invalid_assets = bbclib.validate_transaction_object(txobj, asset_files)
if txobj_is_valid:
self.stats.update_stats_increment("transaction", "success_repair", 1)
for idx in range(len(self.data_handler.db_adaptors)):
self.data_handler.restore_transaction_data(db_num=idx, transaction_id=txobj.transaction_id, txobj=txobj)
add_info = {
"repaired_by": dat[KeyType.source_node_id].hex(),
"repaired_at": int(time.time())
}
self.requesting_list[dat[KeyType.nonce]].update(add_info)
self._output_log(self.requesting_list[dat[KeyType.nonce]])
del self.requesting_list[dat[KeyType.nonce]]
def _send_asset_file(self, dat):
"""Send the asset file if having valid one
Args:
dat (dict): received message
"""
#print("_send_asset_file::")
asset_group_id = dat[KeyType.asset_group_id]
asset_id = dat[KeyType.asset_id]
asset_file = self.data_handler.get_in_storage(asset_group_id, asset_id)
if asset_file is None:
return
result_txobj, result_asset_files = self.data_handler.search_transaction(asset_group_id=asset_group_id,
asset_id=asset_id)
txobj = next(iter(result_txobj.values()))
txobj_is_valid, valid_assets, invalid_assets = bbclib.validate_transaction_object(txobj, result_asset_files)
if (asset_group_id, asset_id) in valid_assets:
dat[KeyType.command] = RepairManager.RESPONSE_ASSET_FILE
dat[KeyType.asset_group_id] = asset_group_id
dat[KeyType.asset_id] = asset_id
dat[KeyType.asset_file] = asset_file
dat[KeyType.destination_node_id] = dat[KeyType.source_node_id]
self.network.send_message_in_network(None, domain_id=self.domain_id, msg=dat)
def _receive_asset_file_from_others(self, dat):
"""Receive asset file from other core_nodes and check its validity
Args:
dat (dict): received message
"""
#print("_receive_asset_file_from_others:")
if KeyType.nonce not in dat or dat[KeyType.nonce] not in self.requesting_list:
return
if KeyType.asset_group_id not in dat or KeyType.asset_id not in dat or KeyType.asset_file not in dat:
return
asset_group_id = dat[KeyType.asset_group_id]
asset_id = dat[KeyType.asset_id]
asset_file = dat[KeyType.asset_file]
if asset_file is None:
return
asset_files = {asset_id: asset_file}
result_txobj, result_asset_files = self.data_handler.search_transaction(asset_group_id=asset_group_id,
asset_id=asset_id)
txobj = next(iter(result_txobj.values()))
txobj_is_valid, valid_assets, invalid_assets = bbclib.validate_transaction_object(txobj, asset_files)
if (asset_group_id, asset_id) in valid_assets:
self.data_handler.store_in_storage(asset_group_id, asset_id, asset_file, do_overwrite=True)
add_info = {
"repaired_by": dat[KeyType.source_node_id].hex(),
"repaired_at": int(time.time())
}
self.requesting_list[dat[KeyType.nonce]].update(add_info)
self._output_log(self.requesting_list[dat[KeyType.nonce]])
del self.requesting_list[dat[KeyType.nonce]]
|
cache.py
|
import collections
import contextlib
import datetime
import functools
import json
import logging
import os
import re
import threading
import time
import sqlalchemy as sa
from sqlalchemy.engine.base import Transaction as _sa_Transaction
from sgapi import TransportError
from sgevents import EventLog
from . import fields
from .api3.create import Api3CreateOperation
from .entity import EntityType
from .events import EventProcessor
from .exceptions import EntityMissing
from .logs import log_globals
from .scanner import Scanner
from .schema import Schema
from .utils import log_exceptions, get_shotgun, try_call_except_traceback
from .control import ControlServer, ControlClient
log = logging.getLogger(__name__)
class Cache(collections.Mapping):
"""The master cache model from which all operations tend to start.
:param db: SQLAlchemy engine to use for cache.
:param schema: The :class:`~.Schema` to cache.
"""
def __init__(self, db=None, schema=None, config=None):
if (config and (db or schema)) or ((db or schema) and not (db and schema)):
raise ValueError('provide either config, or db and schema')
self.config = config
if config:
db = sa.create_engine(config['SQLA_URL'], echo=bool(config['SQLA_ECHO']))
schema = Schema.from_yaml(config['SCHEMA'])
self.db = db
self.metadata = sa.MetaData(bind=db)
self.schema = schema
self.shotgun = get_shotgun('sgapi', config=config)
self.control_clients = {}
# Build model objects from the schema; these will not be complete
# until we reflect the database below.
self._entity_types = {}
for name, entity_schema in schema.iteritems():
self._entity_types[name] = EntityType(self, name, entity_schema)
# Reflect the database and issue any required DDL.
self.metadata.reflect()
for entity in self._entity_types.itervalues():
entity._construct_schema()
@contextlib.contextmanager
def db_connect(self, con=None):
# If given a connection, use that.
if con is not None:
yield con
else:
with self.db.connect() as con:
yield con
@contextlib.contextmanager
def db_begin(self, con=None):
# If we have a "connection" that came from Engine.begin(),
# then just pass it through.
if con is not None and isinstance(con, _sa_Transaction):
yield con
# If we have a "real" connection, start a transaction.
elif con is not None:
with con.begin():
yield con
# Yield a combo connection/transaction.
else:
with self.db.begin() as con:
yield con
def __getitem__(self, key):
try:
return self._entity_types[key]
except KeyError as e:
raise EntityMissing(e.args[0])
def __iter__(self):
return iter(self._entity_types)
def __len__(self):
return len(self._entity_types)
def _clear(self):
with self.db_begin() as con:
for entity_type in self._entity_types.itervalues():
entity_type._clear(con)
def filter_cacheable_data(self, type_name, data=None):
if isinstance(type_name, dict):
data = type_name.copy()
type_name = data.pop('type')
cacheable_data = {}
if 'id' in data:
cacheable_data['id'] = data.pop('id')
data.pop('type', None)
entity_type = self[type_name]
for field_name, value in data.iteritems():
field = entity_type.get(field_name)
if field and field.is_cached():
cacheable_data[field_name] = value
return cacheable_data
def filter_cacheable_entity(self, entity):
type_ = entity['type']
cacheable = self.filter_cacheable_data(entity)
cacheable['type'] = type_
return cacheable
def fetch_via_find(self, type_name, filters, async=False):
entity_type = self[type_name]
entities = self.shotgun.find(type_name, filters,
fields=[key for key, field in entity_type.fields.iteritems() if field.is_cached()],
async=async,
)
for found in entities:
cacheable = self.filter_cacheable_entity(found)
if cacheable:
self.create_or_update(type_name, cacheable, create_with_id=True)
return entities
def fetch_partial_entities(self, template_entities):
# Split up by type.
ids_by_type = {}
for e in template_entities:
ids_by_type.setdefault(e['type'], []).append(e['id'])
# Do the updates.
for type_name, ids in entities.iteritems():
# We need to fetch all of the data from the real server; bleh.
entity_type = self[type_name]
future = self.fetch_via_find(type_name, [('id', 'is', ids)], async=True)
futures.append((entity_type, ids, future))
# Cache everything as it comes back.
for entity_type, ids, future in futures:
fetched_entities = future.result()
if len(fetched_entities) != len(ids):
log.error('Only found %d of %d provided %s' % (len(ids), len(fetched_entities), entity_type.type_name))
def create_or_update(self, type_name, data, create_with_id=False, source_event=None, **kwargs):
"""Create or update an entity, with an API eerily similar to ``python_api3``.
This is a wrapper around :class:`.Api3CreateOperation`.
:param str type_name: The name of the type of entity to create/update.
:param dict data: The key-value data for that entity.
:param bool create_with_id: Should ``id`` be allowed within the ``data`` param?
If not, then the entity must already exist, and this is an ``update``.
If so, then the entity will be updated if it exists, or will be
created if not (and it is assumed that ``data`` represents a complete
view of that entity).
:param \**kwargs: Options to pass to :meth:`.Api3CreateOperation.run`.
:return: The :class:`~.Api3CreateOperation`, which can be inspected to
see if the entity existed or not.
::
>>> res = cache.create_or_update('Task', data)
>>> res.entity_exists
False
"""
request = {
'type': type_name,
'fields': [{'field_name': k, 'value': v} for k, v in data.iteritems()],
'return_fields': ['id'],
}
op = Api3CreateOperation(request, create_with_id=create_with_id, source_event=source_event)
op.run(self, **kwargs)
return op
def retire(self, type_name, entity_id, **kwargs):
return self._set_active(type_name, entity_id, False, **kwargs)
def revive(self, type_name, entity_id, **kwargs):
return self._set_active(type_name, entity_id, True, **kwargs)
def _set_active(self, type_name, entity_id, state, extra=None, source_event=None, con=None, strict=True):
entity_type = self[type_name]
data = self.filter_cacheable_data(type_name, extra) if extra else {}
data['_active'] = bool(state)
data['_cache_updated_at'] = datetime.datetime.utcnow() # TODO: isn't this automatic?
if source_event:
data['_last_log_event_id'] = source_event.id
with self.db_begin(con) as con:
res = con.execute(entity_type.table.update().where(entity_type.table.c.id == entity_id), **data)
if strict and not res.rowcount:
raise ValueError('cannot %s un-cached %s %d' % ('revive' if state else 'retire', type_name, entity_id))
return bool(res.rowcount)
def get_last_event(self):
"""Get tuple of ``(last_id, last_time)`` stored in the cache.
This is optionally used to seed :meth:`watch` and :meth:`scan`.
"""
last_id = None
last_time = None
for entity_type in self._entity_types.itervalues():
row = sa.select([
sa.func.max(entity_type.table.c._last_log_event_id),
sa.func.max(entity_type.table.c._cache_updated_at),
]).execute().fetchone()
# We can max(None, 1), so this is ok...
last_id = max(last_id, row[0])
# ... but datetime does not compare against None directly.
last_time = max(last_time, row[1]) if (last_time and row[1]) else (last_time or row[1])
return last_id, last_time
def watch(self, last_id=None, last_time=None, auto_last_id=False, idle_delay=5.0, async=False):
"""Watch the Shotgun event log, and process events.
:param int last_id: Last seen event ID to start processing at.
:param datetime.datetime last_time: Last seen cache time to start processing at.
:param bool auto_last_id: Should we use :meth:`get_last_event`
to determine where to start processing?
:param float idle_delay: Seconds between polls of the event log.
:param bool async: Should this be run in a thread?
:returns: ``threading.Thread`` if ``async`` is true.
"""
if async:
thread = threading.Thread(target=functools.partial(try_call_except_traceback, self.watch, last_id, last_time, auto_last_id, idle_delay))
thread.daemon = True
thread.start()
return thread
if auto_last_id:
last_id, last_time = self.get_last_event()
# Ask for the updated_at of every entity that we care about.
# This is used in the handling of "change" events.
extra_fields = []
for entity_name in self:
extra_fields.append('entity.%s.updated_at' % entity_name)
self.event_log = EventLog(shotgun=self.shotgun, last_id=last_id, last_time=last_time, extra_fields=extra_fields)
self.event_processor = EventProcessor(self)
io_error_count = 0
error_count = 0
while True:
try:
for event in self.event_log.iter_events_forever(idle_delay=idle_delay):
io_error_count = error_count = 0
log_globals.meta = {'event': event['id']}
try:
log.info(event.summary)
handler = self.event_processor.get_handler(event)
if not handler:
continue
with self.db.begin() as con:
handler(con)
except:
log.exception('Error during event %d:\n%s' % (event['id'], event.dumps(pretty=True)))
finally:
log_globals.meta = {}
except IOError as e:
io_error_count += 1
log.log(
logging.ERROR if io_error_count % 60 == 2 else logging.WARNING,
'No connection to Shotgun for ~%d minutes; sleeping for 10s' % (io_error_count / 6),
exc_info=True,
)
time.sleep(10)
except:
# NOTE: The event log may have corrupted its ID tracking, and
# be in an infinite loop. Only send emails about the first few,
# because inboxes can fill up.
error_count += 1
if error_count <= 10 or error_count % 60 == 1:
if error_count >= 10:
log.exception('Error %d during event iteration; silencing for ~10 minutes; sleeping for 10s' % error_count)
else:
log.exception('Error %d during event iteration; sleeping for 10s' % error_count)
else:
log.warning('Error %d during event iteration; sleeping for 10s' % error_count, exc_info=True)
time.sleep(10)
else:
log.warning('EventLog.iter_events_forever() returned; sleeping for 10s')
time.sleep(10)
def scan(self, interval=None, last_time=None, auto_last_time=False, async=False, **kwargs):
"""Periodically scan Shotgun for updated entities.
:param float interval: Seconds between scans; ``None`` implies a single scan.
:param datetime.datetime last_time: When to scan for updates since; ``None``
implies a complete scan of Shotgun.
:param bool auto_last_time: Should we use :meth:`get_last_event` to
determine when to scan since?
:param bool async: Should this be run in a thread?
:returns: ``threading.Thread`` if ``async`` is true.
"""
if async:
thread = threading.Thread(target=functools.partial(try_call_except_traceback, self.scan, interval, last_time, auto_last_time, **kwargs))
thread.daemon = True
thread.start()
return thread
if auto_last_time:
last_id, last_time = self.get_last_event()
self.scanner = Scanner(self, last_time=last_time, config=self.config, **kwargs)
while True:
try:
self.scanner.scan(interval)
if not interval:
break # only need to get through one
except:
log.exception('error during scan; sleeping for 30s')
time.sleep(30)
def get_control_path(self, name):
return os.path.join(self.config['DATA_ROOT'], 'control', name)
def build_control_server(self, name):
return ControlServer(self.get_control_path(name), name=name)
def get_control_client(self, name):
try:
return self.control_clients[name]
except KeyError:
client = ControlClient(self.get_control_path(name))
self.control_clients[name] = client
return client
def send_control_message(self, service, type, **kw):
client = self.get_control_client(service)
client.send(type, **kw)
def send_and_recv_control_message(self, service, type, **kw):
client = self.get_control_client(service)
return client.send_and_recv(type, **kw)
|
t.py
|
from flask import Flask, request
app = Flask(__name__)
#import threading
#import recod
@app.route('/')
def hello_world():
#x = threading.Thread(target=recod.freeze_support, args=None)
#x.start()
with open("d.htm") as f:
return f.read()
#@app.route('/gotcha',methods=['POST'])
#def gotcha():
#return request.form.get('png')
#x = threading.Thread(target=recod.freeze_support, args=None)x.start()
|
song_line.py
|
# -*- coding: utf-8 -*-
from Scripts.elements import *
from Scripts.song_manage import SongManage
from Scripts.music_storage import MusicStorage
from Scripts.music_interface import MusicInterface
class SongLine(SongManage, MusicInterface):
def __init__(self):
Main.SONG_LINE_CANVAS = Canvas(Main.ROOT, width=Main.SETTINGS.width, height=200, bg=themes[Main.SETTINGS.theme]["second_color"], bd=0, highlightthickness=0)
Main.SONG_LINE_CANVAS.pack()
self.song_id_now = ""
self.time_line_now = None
def song_time_thread(self):
song_id_now = Main.SONG_PLAY_NOW["song_id"]
self.song_duration = [int(i) for i in Main.SONG_PLAY_NOW["time"].split(":")] # song time
self.sec_song_duration = (60*self.song_duration[0] + self.song_duration[1]+1)
self.time_line_bbox = Main.SONG_LINE_CANVAS.bbox(self.time_line)
self.num_for_time_line = 160 / self.sec_song_duration
if Main.SONG_TIME_NOW == "00:00":
# if play new song #
self.num_for_time_line_now = 0 # default time line
while Main.PLAYER_SETTINGS["play"] and song_id_now == Main.SONG_PLAY_NOW["song_id"]:
# after song #
if int(Main.PLAYER.get_time()) >= self.sec_song_duration:
if Main.PLAYER_SETTINGS["cycle"]:
self.behind_after_music(0)
return
elif Main.PLAYER_SETTINGS["random_song"]:
self.play_random_song()
return
self.behind_after_music(1)
return
elif int(Main.PLAYER.get_time()) <= self.sec_song_duration:
self.update_time()
time_sleep(1)
def loading_song(self, error=None):
Main.SONG_LINE_CANVAS.delete("all")
text = languages[error][Main.SETTINGS.language] if error else languages["Загрузка"][Main.SETTINGS.language]+"..."
Main.SONG_LINE_CANVAS.create_text(30, 40, text=text, fill=themes[Main.SETTINGS.theme]["text_color"], anchor=W, font="Verdana 12")
draw_just_lines()
Main.ROOT.update()
def update_time(self):
song_time_now = int(Main.PLAYER.get_time())
Main.SONG_TIME_NOW = time.strftime("%M:%S", time.gmtime(song_time_now))
self.num_for_time_line_now = self.num_for_time_line*song_time_now
Main.SONG_LINE_CANVAS.delete(self.time)
Main.SONG_LINE_CANVAS.delete(self.time_line_now)
self.time = Main.SONG_LINE_CANVAS.create_text(self.x_time, 42, text=Main.SONG_TIME_NOW, fill=themes[Main.SETTINGS.theme]["text_second_color"], anchor=W, font="Verdana 10")
self.time_line_now = Main.SONG_LINE_CANVAS.create_line(Main.SONG_LINE_CANVAS.bbox(self.time)[2]+8, Main.SONG_LINE_CANVAS.bbox(self.time)[3]-7, self.time_line_bbox[0]+self.num_for_time_line_now+5, Main.SONG_LINE_CANVAS.bbox(self.time)[3]-7, width=4, fill="black")
Main.SONG_LINE_CANVAS.tag_bind(self.time_line_now, "<Motion>", self.draw_time_under_mouse)
Main.SONG_LINE_CANVAS.tag_bind(self.time_line_now, "<Leave>", self.del_time_under_mouse)
Main.SONG_LINE_CANVAS.tag_bind(self.time_line_now, "<Button-1>", self.set_time)
def set_time(self, event):
Main.PLAYER.set_time(float(int(self.set_song_sec)))
self.update_time()
def draw_time_under_mouse(self, event):
self.cursor_x = event.x - self.start_song_line
self.set_song_sec = (self.cursor_x / self.num_for_time_line) - 3
if self.set_song_sec > self.sec_song_duration:
self.song_sec = time.strftime("%M:%S", time.gmtime(self.sec_song_duration-1))
elif self.set_song_sec < 0.0:
self.song_sec = "00:00"
else:
self.song_sec = time.strftime("%M:%S", time.gmtime(self.set_song_sec))
self.del_time_under_mouse(None)
self.time_under_cursor = Main.SONG_LINE_CANVAS.create_text(event.x, 29, text=self.song_sec, fill=themes[Main.SETTINGS.theme]["text_color"], font="Verdana 10")
def del_time_under_mouse(self, event):
try: Main.SONG_LINE_CANVAS.delete(self.time_under_cursor)
except: pass
def draw_music_line(self, change_settings=False):
clear_ram()
Main.SONG_LINE_CANVAS.delete("all")
if Main.SONG_PLAY_NOW["song_id"] is None:
return
song_name = Main.SONG_PLAY_NOW["name"][:40]+'...' if len(Main.SONG_PLAY_NOW["name"]) > 40 else Main.SONG_PLAY_NOW["name"]
song_author = Main.SONG_PLAY_NOW["author"][:40]+'...' if len(Main.SONG_PLAY_NOW["author"]) > 40 else Main.SONG_PLAY_NOW["author"]
# Song info #
self.song_name = Main.SONG_LINE_CANVAS.create_text(30, 32, text=song_name, fill=themes[Main.SETTINGS.theme]["text_color"], anchor=W, font="Verdana 12")
self.song_author = Main.SONG_LINE_CANVAS.create_text(30, 52, text=song_author, fill=themes[Main.SETTINGS.theme]["text_second_color"], anchor=W, font="Verdana 12")
# time now #
self.x_time = Main.SONG_LINE_CANVAS.bbox(self.song_name)[2]+23 if Main.SONG_LINE_CANVAS.bbox(self.song_name)[2] > Main.SONG_LINE_CANVAS.bbox(self.song_author)[2] else Main.SONG_LINE_CANVAS.bbox(self.song_author)[2]+23
self.time = Main.SONG_LINE_CANVAS.create_text(self.x_time, 42, text=Main.SONG_TIME_NOW, fill=themes[Main.SETTINGS.theme]["text_second_color"], anchor=W, font="Verdana 10")
# time line #
self.time_line = Main.SONG_LINE_CANVAS.create_line(Main.SONG_LINE_CANVAS.bbox(self.time)[2]+8, Main.SONG_LINE_CANVAS.bbox(self.time)[3]-7, Main.SONG_LINE_CANVAS.bbox(self.time)[2]+167, Main.SONG_LINE_CANVAS.bbox(self.time)[3]-7, width=4, fill=themes[Main.SETTINGS.theme]["text_second_color"])
try:
self.time_line_now = Main.SONG_LINE_CANVAS.create_line(Main.SONG_LINE_CANVAS.bbox(self.time)[2]+8, Main.SONG_LINE_CANVAS.bbox(self.time)[3]-7, self.time_line_bbox[0]+self.num_for_time_line_now+5, Main.SONG_LINE_CANVAS.bbox(self.time)[3]-7, width=4, fill="black")
except:
self.time_line_now = Main.SONG_LINE_CANVAS.create_line(Main.SONG_LINE_CANVAS.bbox(self.time_line)[2], Main.SONG_LINE_CANVAS.bbox(self.time_line)[3]-7, 0, Main.SONG_LINE_CANVAS.bbox(self.time_line)[3]-7, width=4, fill="black")
self.start_song_line = Main.SONG_LINE_CANVAS.bbox(self.time_line)[0]
Main.SONG_LINE_CANVAS.tag_bind(self.time_line, "<Motion>", self.draw_time_under_mouse)
Main.SONG_LINE_CANVAS.tag_bind(self.time_line, "<Leave>", self.del_time_under_mouse)
Main.SONG_LINE_CANVAS.tag_bind(self.time_line, "<Button-1>", self.set_time)
Main.SONG_LINE_CANVAS.tag_bind(self.time_line_now, "<Motion>", self.draw_time_under_mouse)
Main.SONG_LINE_CANVAS.tag_bind(self.time_line_now, "<Leave>", self.del_time_under_mouse)
Main.SONG_LINE_CANVAS.tag_bind(self.time_line_now, "<Button-1>", self.set_time)
if self.song_id_now != Main.SONG_PLAY_NOW["song_id"]:
Main.SONG_LINE_CANVAS.delete(self.time_line_now)
self.song_id_now = Main.SONG_PLAY_NOW["song_id"]
# song time #
self.song_time = Main.SONG_LINE_CANVAS.create_text(Main.SONG_LINE_CANVAS.bbox(self.time_line)[2]+7, Main.SONG_LINE_CANVAS.bbox(self.time_line)[1]+4, text=Main.SONG_PLAY_NOW["time"], fill=themes[Main.SETTINGS.theme]["text_second_color"], anchor=W, font="Verdana 10")
# Button "behind song" #
self.behind_song_button = Main.SONG_LINE_CANVAS.create_window(Main.SONG_LINE_CANVAS.bbox(self.song_time)[2]+15, Main.SONG_LINE_CANVAS.bbox(self.song_time)[1]+8, anchor=W, window=Button(image=MyImage.BEHIND_SONG, command=lambda: self.behind_after_music(-1), width=15, height=18, bd=0, bg=themes[Main.SETTINGS.theme]["second_color"], activebackground=themes[Main.SETTINGS.theme]["second_color"], relief=RIDGE))
# Button "play/stop" #
if Main.PLAYER_SETTINGS["play"]:
self.play_button = Button(image=MyImage.PAUSE, command=lambda: self.click_play(), width=16, height=23, bd=0, bg=themes[Main.SETTINGS.theme]["second_color"], activebackground=themes[Main.SETTINGS.theme]["second_color"], relief=RIDGE)
else:
self.play_button = Button(image=MyImage.PLAY, command=lambda: self.click_play(), width=16, height=23, bd=0, bg=themes[Main.SETTINGS.theme]["second_color"], activebackground=themes[Main.SETTINGS.theme]["second_color"], relief=RIDGE)
self.play_button_draw = Main.SONG_LINE_CANVAS.create_window(Main.SONG_LINE_CANVAS.bbox(self.behind_song_button)[2]+8, Main.SONG_LINE_CANVAS.bbox(self.song_time)[1]+8, anchor=W, window=self.play_button)
# Button "after song" #
self.after_song_button = Main.SONG_LINE_CANVAS.create_window(Main.SONG_LINE_CANVAS.bbox(self.play_button_draw)[2]+9, Main.SONG_LINE_CANVAS.bbox(self.song_time)[1]+8, anchor=W, window=Button(image=MyImage.AFTER_SONG, command=lambda: self.behind_after_music(1), width=15, height=18, bd=0, bg=themes[Main.SETTINGS.theme]["second_color"], activebackground=themes[Main.SETTINGS.theme]["second_color"], relief=RIDGE))
# Button "cycle" #
if Main.PLAYER_SETTINGS["cycle"]:
self.cycle_button = Button(image=MyImage.CYCLE_CLICK, command=lambda: self.cycle_song(), width=18, height=18, bd=0, bg=themes[Main.SETTINGS.theme]["second_color"], activebackground=themes[Main.SETTINGS.theme]["second_color"], relief=RIDGE)
else:
self.cycle_button = Button(image=MyImage.CYCLE, command=lambda: self.cycle_song(), width=18, height=18, bd=0, bg=themes[Main.SETTINGS.theme]["second_color"], activebackground=themes[Main.SETTINGS.theme]["second_color"], relief=RIDGE)
self.cycle_button_draw = Main.SONG_LINE_CANVAS.create_window(Main.SONG_LINE_CANVAS.bbox(self.after_song_button)[2]+13, Main.SONG_LINE_CANVAS.bbox(self.song_time)[1]+10, anchor=W, window=self.cycle_button)
# Button "random song" #
if Main.PLAYER_SETTINGS["random_song"]:
self.rand_song_button = Button(image=MyImage.RANDOM_SONG_CLICK, command=lambda: self.random_song(), width=18, height=18, bd=0, bg=themes[Main.SETTINGS.theme]["second_color"], activebackground=themes[Main.SETTINGS.theme]["second_color"], relief=RIDGE)
else:
self.rand_song_button = Button(image=MyImage.RANDOM_SONG, command=lambda: self.random_song(), width=18, height=18, bd=0, bg=themes[Main.SETTINGS.theme]["second_color"], activebackground=themes[Main.SETTINGS.theme]["second_color"], relief=RIDGE)
self.rand_song_button_draw = Main.SONG_LINE_CANVAS.create_window(Main.SONG_LINE_CANVAS.bbox(self.cycle_button_draw)[2]+9, Main.SONG_LINE_CANVAS.bbox(self.song_time)[1]+9, anchor=W, window=self.rand_song_button)
# Button "more music" #
self.more_music_button = Main.SONG_LINE_CANVAS.create_window(Main.SONG_LINE_CANVAS.bbox(self.rand_song_button_draw)[2]+9, Main.SONG_LINE_CANVAS.bbox(self.song_time)[1]+8, anchor=W, window=Button(image=MyImage.MORE_MUSIC, command=lambda: self.music_interface("Сейчас_играет", Main.LIST_OF_PLAY), width=20, height=18, bd=0, bg=themes[Main.SETTINGS.theme]["second_color"], activebackground=themes[Main.SETTINGS.theme]["second_color"], relief=RIDGE))
# Button "more info" #
self.more_info_button = Main.SONG_LINE_CANVAS.create_window(Main.SONG_LINE_CANVAS.bbox(self.more_music_button)[2]+7, Main.SONG_LINE_CANVAS.bbox(self.song_time)[1]+8, anchor=W, window=Button(image=MyImage.MORE_INFO, command=lambda: (Main.MORE_INFO_INTERFACE.song_info_draw(Main.PAST_SONG["class"].song_data) if Main.PAST_SONG["lib_now"] != "Settings" else None), width=17, height=19, bd=0, bg=themes[Main.SETTINGS.theme]["second_color"], activebackground=themes[Main.SETTINGS.theme]["second_color"], relief=RIDGE))
draw_just_lines()
if Main.PLAYER_SETTINGS["play"] and not change_settings:
Thread(target=Main.SONG_LINE.song_time_thread, daemon=True).start()
|
crypto.py
|
import functools
import json
import os
import subprocess
import tempfile
from mediaman import config
from mediaman.core import logtools
from mediaman.core import models
from mediaman.core import settings
from mediaman.middleware import simple
logger = logtools.new_logger("mediaman.middleware.crypto")
def init(func):
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
self.init_metadata()
return func(self, *args, **kwargs)
return wrapped
# <openssl> enc -<e/d> -<cipher> -kfile <keypath> -md <digest> -in <inpath> -out <outpath>
DEFAULT_CIPHER = "aes-256-cbc"
DEFAULT_DIGEST = "sha256"
CRYPTO_KEY_ENV_VAR = "MM_CRYPTO_KEY"
DEFAULT_KEY_PATH = os.path.expanduser("~/.mediaman/key")
KEYPATH = config.load(CRYPTO_KEY_ENV_VAR, default=DEFAULT_KEY_PATH)
OPENSSL_PREFERRED_BINS = [
"/usr/local/Cellar/libressl/3.2.2/bin",
"/usr/local/Cellar/libressl/2.9.2/bin",
"/usr/bin",
]
ERROR_MULTIPLE_REMOTE_FILES = "\
[!] Multiple crypt files found for service ({})! \
This must be resolved manually. Exiting..."
TEST_SESH = {} # name: salt
def form_path_prepend():
global OPENSSL_PREFERRED_BINS
return ":".join(OPENSSL_PREFERRED_BINS + [config.load_safe("PATH")])
def form_subprocess_environ():
new_env = os.environ.copy()
new_env["PATH"] = form_path_prepend()
return new_env
def encrypt(request, keypath, cipher, digest):
tempfile_ref = tempfile.NamedTemporaryFile(mode="wb+", delete=True)
args = [
"openssl", "enc", "-e",
"-in", str(request.path),
"-out", str(tempfile_ref.name),
"-kfile", keypath, f"-{cipher}", "-md", digest,
]
logger.info(f"encrypting: {args}")
logger.info(f"Encrypting file...")
subprocess.check_output(args, stderr=subprocess.PIPE, env=form_subprocess_environ())
tempfile_ref.seek(0)
return tempfile_ref
def decrypt(source, destination, keypath, cipher, digest):
args = [
"openssl", "enc", "-d",
"-in", str(source),
"-out", str(destination),
"-kfile", keypath, f"-{cipher}", "-md", digest,
]
logger.info(f"decrypting: {args}")
logger.info(f"Decrypting file...")
try:
subprocess.check_output(args, stderr=subprocess.PIPE, env=form_subprocess_environ())
except subprocess.CalledProcessError as exc:
err_text = exc.stderr.decode("utf-8")
logger.debug(exc)
if err_text.startswith("bad decrypt"):
logger.fatal(f"Decryption failed -- encryption key is incorrect.")
else:
logger.fatal(f"Decryption failed -- generic error: {err_text}")
raise
def decrypt_stream(source, keypath, cipher, digest):
import threading
def pump_input(pipe, source):
# https://stackoverflow.com/questions/32322034/writing-large-amount-of-data-to-stdin
with pipe:
for bytez in source:
pipe.write(bytez)
pipe.flush()
def deal_with_stdout(process, sink):
for bytez in process.stdout:
sink.write(bytez)
sink.flush()
args = [
"openssl", "enc", "-d",
"-kfile", keypath, f"-{cipher}", "-md", digest,
"-bufsize", "1048576",
]
logger.info(f"decrypting: {args}")
logger.info(f"Decrypting stream...")
try:
process = subprocess.Popen(
args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=form_subprocess_environ())
# threading.Thread(target=deal_with_stdout, args=[process, sink]).start()
# for bytez in source:
# process.stdin.write(bytez)
# process.stdin.flush()
threading.Thread(target=pump_input, args=[process.stdin, source]).start()
# import sys
# sink = sys.stdout.buffer
# for bytez in process.stdout:
# # sink.write(bytez)
# # sink.flush()
while True:
bytez = process.stdout.read()
# logger.debug(bytez)
if not bytez:
return
yield bytez
# for bytez in process.stdout:
# logger.debug(bytez)
# yield bytez
# yield from process.stdout
except subprocess.CalledProcessError as exc:
err_text = exc.stderr.decode("utf-8")
logger.debug(exc)
if err_text.startswith("bad decrypt"):
logger.fatal(f"Decryption failed -- encryption key is incorrect.")
else:
logger.fatal(f"Decryption failed -- generic error: {err_text}")
raise
def create_metadata(data=None):
if data is None:
data = {}
return {
"version": settings.VERSION,
"data": data,
}
class EncryptionMiddlewareService(simple.SimpleMiddleware):
MIDDLEWARE_FILENAME = "crypt"
def __init__(self, service):
super().__init__(service)
logger.info(f"EncryptionMiddlewareService init for {service}")
self.metadata_id = None
self.metadata = None
def init_metadata(self):
if self.metadata is not None:
return
# TODO: implement
file_list = self.service.search_by_name(EncryptionMiddlewareService.MIDDLEWARE_FILENAME)
files = file_list.results()
if len(files) > 1:
raise RuntimeError(ERROR_MULTIPLE_REMOTE_FILES.format(self.service))
if not files:
logger.debug(f"creating metadata")
self.metadata = create_metadata()
self.update_metadata()
else:
logger.debug(f"loading metadata")
self.load_metadata(files[0])
def update_metadata(self):
with tempfile.NamedTemporaryFile("w+", delete=True) as tempfile_ref:
tempfile_ref.write(json.dumps(self.metadata))
tempfile_ref.seek(0)
request = models.Request(
id=EncryptionMiddlewareService.MIDDLEWARE_FILENAME,
path=tempfile_ref.name,
)
receipt = self.service.upload(request)
logger.info(f"update_metadata receipt: {receipt}")
self.metadata_id = receipt.id()
def load_metadata_json(self, metadata_file):
logger.info(f"load_metadata_json file: {metadata_file}")
self.metadata_id = metadata_file.id()
with tempfile.NamedTemporaryFile("w+", delete=True) as tempfile_ref:
request = models.Request(
id=self.metadata_id,
path=tempfile_ref.name,
)
self.service.download(request)
tempfile_ref.seek(0)
return json.loads(tempfile_ref.read())
def load_metadata(self, metadata_file):
self.metadata = self.load_metadata_json(metadata_file)
logger.debug(f"Loaded metadata: {self.metadata}")
if "version" not in self.metadata:
logger.critical(f"'version' field missing from metadata! This is an outdated or unversioned meta file. You will need to fix it by running `mm <service> refresh`.")
raise RuntimeError("Unversioned metadata")
version = self.metadata["version"]
if version > settings.VERSION:
logger.critical(f"Metadata version ({version}) exceeds software version ({settings.VERSION}). You need to update your software to parse this metadata file.")
raise RuntimeError("Outdated software")
if version < settings.VERSION:
logger.critical(f"Metadata version ({version}) is below software version ({settings.VERSION}). You need to update it by running `mm <service> refresh`.")
raise RuntimeError("Outdated metadata")
def track_cipher(self, key, cipher, digest):
global CIPHER
self.metadata["data"][key] = {"cipher": cipher, "digest": digest}
self.update_metadata()
def upload(self, request):
# TODO: remove metadata when file is deleted
keypath = KEYPATH
cipher = DEFAULT_CIPHER
digest = DEFAULT_DIGEST
with encrypt(request, keypath, cipher, digest) as encrypted_tempfile:
request.path = encrypted_tempfile.name
receipt = self.service.upload(request)
self.track_cipher(receipt.id(), cipher, digest) # IMPORTANT -- must track by sid!
return receipt
def download(self, request):
if request.id not in self.metadata["data"]:
logger.info(f"Downloading unencrypted file: {request}")
return self.service.download(request)
params = self.metadata["data"][request.id]
keypath = KEYPATH
cipher = params["cipher"]
digest = params["digest"]
logger.info(f"Downloading encrypted file: {request}")
with tempfile.NamedTemporaryFile("wb+", delete=True) as tempfile_ref:
temp_request = models.Request(
id=request.id,
path=tempfile_ref.name,
)
receipt = self.service.download(temp_request)
tempfile_ref.seek(0)
decrypt(tempfile_ref.name, request.path, keypath, cipher, digest)
return receipt
def stream(self, request):
if request.id not in self.metadata["data"]:
logger.info(f"Streaming unencrypted file: {request}")
return self.service.stream(request)
# global TEST_SESH
# if request.id not in TEST_SESH:
# TEST_SESH[request.id] = {"ebuff": None, "pbuff": None}
params = self.metadata["data"][request.id]
keypath = KEYPATH
cipher = params["cipher"]
digest = params["digest"]
logger.info(f"Streaming encrypted file: {request}")
temp_request = models.Request(
id=request.id,
)
stream = self.service.stream(temp_request)
return decrypt_stream(stream, keypath, cipher, digest)
def stream_range(self, request, offset, length):
if request.id not in self.metadata["data"]:
logger.info(f"Streaming unencrypted file: {request}")
return self.service.stream_range(request, offset, length)
logger.info(f"Streaming encrypted file: {request}")
temp_request = models.Request(
id=request.id,
)
if offset < 16:
# raise RuntimeError("Not supported!")
return self.stream_range_continuous(temp_request, offset, length)
return self.stream_range_discontinuous(temp_request, offset, length)
def stream_range_continuous(self, request, offset, length):
params = self.metadata["data"][request.id]
keypath = KEYPATH
cipher = params["cipher"]
digest = params["digest"]
import math
BLOCK_SIZE = 16
blocks_needed = int(math.ceil((offset + length) / BLOCK_SIZE))
skip = 0
count = (blocks_needed + 2) * BLOCK_SIZE
stream = self.service.stream_range(request, skip, count)
out_stream = decrypt_stream(stream, keypath, cipher, digest)
remaining = length
first = True
for bytez in out_stream:
# print(f"\tremaining={remaining}")
if first:
# print(f"\ttrimming {bytez}")
bytez = bytez[(offset % 16):]
# print(f"\tto {bytez}")
first = False
if len(bytez) >= remaining:
yield bytez[:remaining]
remaining -= remaining
break
yield bytez
remaining -= len(bytez)
def stream_range_discontinuous(self, request, offset, length):
params = self.metadata["data"][request.id]
keypath = KEYPATH
cipher = params["cipher"]
digest = params["digest"]
import itertools
import math
logger.debug(f"Getting CBC salt...")
BLOCK_SIZE = 16
start_block = offset // BLOCK_SIZE
blocks_needed = int(math.ceil((offset + length) / BLOCK_SIZE) - start_block)
global TEST_SESH
try:
salt = TEST_SESH[request.id]
except KeyError:
salt = [next(self.service.stream_range(request, 0, 16))]
TEST_SESH[request.id] = salt
skip = (start_block) * BLOCK_SIZE
count = (blocks_needed + 2) * BLOCK_SIZE
data = self.service.stream_range(request, skip, count)
stream = itertools.chain(salt, data)
out_stream = decrypt_stream(stream, keypath, cipher, digest)
remaining = length
first = True
for bytez in out_stream:
# print(f"\tremaining={remaining}")
if first:
# print(f"\ttrimming {bytez}")
bytez = bytez[(offset % 16) + 16:]
# print(f"\tto {bytez}")
first = False
if len(bytez) >= remaining:
# print(f"\tend")
yield bytez[:remaining]
remaining -= remaining
break
yield bytez
remaining -= len(bytez)
|
fts.py
|
"""
fts.py
------
Script to transfer file(s) to or from a host that is behind a UNIX gateway.
File transfer to a host that is behind a UNIX gateway requires
authentication with the gateway prior to accessing the host itself.
Arguments can be passed directly when calling the script - supports both
the short and long version: e.g. --h or --help). If not, user will be
prompted for the required information (e.g. Unix gate to use, gate username,
remote host, file(s) to be transferred, etc.). The script uses different CSV files
which has information about the gateway hosts, group of servers to access,
non-MS hosts, andMS client information (client instance, hostname,
FTP password, client ID).
"""
import json
import argparse
import logging
import logging.config
import math
import getpass
import csv
import ftplib
import contextlib
import sys
import threading
import datetime
import time
from pprint import pprint
from pathlib import Path
# global variables and constants
VERSION_NO = '1.0'
BUILD_DATE = '2019/06/19'
BUILD_TIME = '14:55:35'
equal_sign_line = '=' * 72
dash_line = '-' * 47
t = lambda: f'{datetime.datetime.now():%c}'
curr_dir = Path()
LOG_DIR = curr_dir / 'logs'
CONFIG_DIR = curr_dir / 'config'
JSON_CONFIG = 'fts.json'
# FileHandler (asctime) will include the date and time stamps
FORMATTER = logging.Formatter(
fmt='%(asctime)s - %(levelname)s - %(message)s', datefmt='%I:%M:%S %p')
# fmt='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %I:%M:%S %p')
LOG_FILE = LOG_DIR / 'fts.log'
class Error(Exception):
"""Base class for exceptions"""
pass
class EmptyInputError(Error):
"""Exception raised if user just pressed enter when prompted to input something"""
pass
class GatewayConnectionError(Error):
"""
Attributes:
logger (logging.Logger object) - Object that handles the FileHandler and StreamHandler
gate (str): Unix gate
location (str): Unix gate location
"""
def __init__(self, logger, gate, location):
self.logger = logger
self.logger.info(dash_line)
self.logger.error(
f'Error connecting to the {location.title()} Gate ({gate})')
self.logger.warning('Possible causes:')
self.logger.warning(
'1. Incorrect IDLDAP.net credentials (username and/or password)')
self.logger.warning('2. Not connected to the company\'s network')
self.logger.warning('3. "VIP Access" sign-in request was denied or timed out')
self.logger.info(dash_line)
raise TerminateTheScript(self.logger)
class RemoteHostConnectionError(Error):
"""Exception raised if unable to login to remote host most likely due to incorrect credentials
Attributes:
logger (logging.Logger object) - Object that handles the FileHandler and StreamHandler
remote_user (str): Remote host username
remote_host (str): Remote host
"""
def __init__(self, logger, remote_user, remote_host):
self.logger = logger
self.remote_user = remote_user
self.remote_host = remote_host
self.logger.error(
f'Host login incorrect! {self.remote_user}@{self.remote_host}')
self.logger.warning(
f'Please double check your credentials (username and/or password)')
raise TerminateTheScript(self.logger)
class WeGotOurselvesAQuitter(Error):
"""Exception raised if user wants to quit the script prematurely
Attribute:
logger(logging.Logger object) - Object that handles the FileHandler and StreamHandler
"""
def __init__(self, logger):
self.logger = logger
print()
self.logger.info(dash_line)
self.logger.warning(
'Ladies and gentlemen, we got ourselves a quitter!!!')
self.logger.warning(
'Quitter!! quitter! quitter... *fades in the background*')
raise TerminateTheScript(self.logger)
class ConfigDoesNotExistError(Error):
"""Exception raised if one of the configuration directories and/or files does not exist
Attributes:
logger (logging.Logger object) - Object that handles the FileHandler and StreamHandler
config (str) - Missing configuration (directory or file)
"""
def __init__(self, logger, config):
self.config = config
self.logger = logger
self.logger.error(f'{self.config} does not exist!')
raise TerminateTheScript(self.logger)
class UploadFileDoesNotExistError(Error):
"""Exception raised if the file to be uploaded does not exist
Attributes:
logger(logging.Logger object) - Object that handles the FileHandler and StreamHandler
file(Path object)
"""
def __init__(self, logger, file, curr_dir):
self.file = file
self.curr_dir = curr_dir
self.logger = logger
self.logger.error(f'{self.file} does not exist in {self.curr_dir}')
self.logger.info(dash_line)
raise TerminateTheScript(self.logger)
class TerminateTheScript(Error):
"""Exception raised from the other custom exceptions to prematurely end the script
Attribute:
logger(logging.Logger object) - object that handles the FileHandler and StreamHandler
"""
def __init__(self, logger):
self.logger = logger
self.logger.warning('Terminating script...')
self.logger.info('End of program')
self.logger.info(f'Logged everything in {LOG_FILE}')
self.logger.info('Thank you for using the script!')
self.logger.info(f'END - {t()}')
self.logger.info(equal_sign_line)
sys.exit()
class FtpConnection():
# next_f = None
upload_size = 0
def __init__(self, gateway, gate_location, gate_user, gate_pwd, server_grp, ms_instance, action, files, remote_host, remote_user, remote_pwd, remote_dir, logger):
self.gateway = gateway
self.gate_location = gate_location
self.gate_user = gate_user
self.gate_pwd = gate_pwd
self.server_grp = server_grp
self.ms_instance = ms_instance
self.action = action
self.files = files
self.remote_host = remote_host
self.remote_user = remote_user
self.remote_pwd = remote_pwd
self.remote_dir = remote_dir
self.logger = logger
self.host = f'MS host' if self.server_grp == 'ms' else f'non-MS host'
def _progress_bar(self, file_name, file_size, action):
"""
Class method to calculate the size of file being transferred
and calling the _update_bar function to update and display
the progress bar
"""
runs = file_size
this_file = Path(file_name)
current_filesize = 0
counter = 0
while current_filesize <= file_size:
if action == 'download':
current_filesize = this_file.stat().st_size
else:
current_filesize = self.upload_size
# print(current_filesize)
self._update_progress_bar(runs, current_filesize + 1)
if current_filesize >= file_size:
time.sleep(1)
break
def _update_progress_bar(self, total, progress):
"""
Class method to update and display the progress bar in the console.
Original source: https://stackoverflow.com/a/15860757/1391441
"""
barLength, status = 20, ""
progress = float(progress) / float(total)
if progress >= 1.:
progress, status = 1, "\r\n"
block = int(round(barLength * progress))
text = f'\r[{"#" * block + "-" * (barLength - block)}] {round(progress * 100, 0):.0f}% {status}'
sys.stdout.write(text)
sys.stdout.flush()
def connect_and_transfer(self):
self.logger.info(
f'Connecting to the {self.gate_location.title()} Gate ({self.gateway})...')
try:
with ftplib.FTP(host=self.gateway) as ftp:
self.logger.info(f'Connection established!')
welcome = ftp.getwelcome()
if welcome:
self.logger.info(
f'A message from the server:\n{welcome}')
self.logger.info(
'Please approve the push notification (sign-in request) in your "VIP Access" mobile app...')
# update Class attribute with the ftp object
self.ftp = ftp
# login to unix gate
self._login_to_gate()
except ftplib.all_errors as e:
raise GatewayConnectionError(
self.logger, self.gateway, self.gate_location)
def _login_to_gate(self):
try:
self.ftp.login(user=self.gate_user, passwd=self.gate_pwd)
self.logger.info(f'User {self.gate_user} logged in')
self._login_to_remote_host()
except ftplib.all_errors:
raise GatewayConnectionError(
self.logger, self.gateway, self.gate_location)
def _login_to_remote_host(self):
# login to the chosen host (MS or non-MS)
try:
self.logger.info(f'Logging in to the {self.host}...')
self.ftp.sendcmd(f'USER {self.remote_user}@{self.remote_host}')
self.ftp.sendcmd(f'PASS {self.remote_pwd}')
self.logger.info(
f'Logged in: {self.remote_user}@{self.remote_host}')
self._transfer_files()
self.ftp.close()
self.logger.info('FTP connection closed')
self.logger.info('Disconnected from server')
except ftplib.all_errors:
raise RemoteHostConnectionError(
self.logger, self.remote_user, self.remote_host)
def _update_remote_filesize(self, x):
"""
Class method to update the instance variable (upload_size) to calculate every block of data transferred
"""
# seems that the ftp.size() method isn't "friendly" if done repeatedly in the _progress_bar function
# and it's getting mixed results. have to devise a workaround which is thru an instance variable
# update this variable every block of data that is uploaded
self.upload_size += 8192
def _transfer_files(self):
if self.server_grp == 'ms':
self.logger.info(
f'By default, transferring files to/from {self.remote_dir}')
try:
self.logger.info(f'Currently in $HOME ({self.ftp.pwd()})')
changed_dir = False
if self.remote_dir != 'home':
self.logger.info(f'Changing directory to: {self.remote_dir}')
self.ftp.cwd(self.remote_dir)
changed_dir = True
self.logger.info('Switching to Binary mode.')
self.ftp.sendcmd('TYPE I')
for next_file in self.files:
downloaded = False
uploaded = False
self.logger.info(dash_line)
self.logger.info(f'Starting {self.action} of {next_file}...')
if self.action == 'download':
with open(next_file, 'wb') as new_file:
file_size = self.ftp.size(next_file)
# prepare the thread to display the progress bar for file transfer
thread_d = threading.Thread(target=self._progress_bar, args=(next_file, file_size, self.action))
thread_d.start()
self.ftp.retrbinary(cmd=f'RETR {next_file}', callback=new_file.write)
thread_d.join()
downloaded = True
else:
with open(next_file, 'rb') as new_file:
file_size = Path(next_file).stat().st_size
self.upload_size = 0
# prepare the thread to display the progress bar for file transfer
thread_u = threading.Thread(target=self._progress_bar, args=(next_file, file_size, self.action))
thread_u.start()
self.ftp.storbinary(f'STOR {next_file}', new_file, callback=self._update_remote_filesize)
thread_u.join()
uploaded = True
if downloaded or uploaded:
self.logger.info(
f'File transfer successful, transferred {self.ftp.size(next_file)} bytes')
self.logger.info(dash_line)
except ftplib.all_errors:
if not changed_dir:
self.logger.error(
f'{self.remote_dir} does not exist in the remote host!')
if self.action == 'download' and not downloaded:
self.logger.error(
f'{next_file} does not exist in {self.remote_dir} !')
# in case of download failure, delete the local file
local_file = Path(next_file)
if local_file.exists():
self.logger.info('Download failed. Deleting local copy...')
local_file.unlink()
self.logger.info(dash_line)
self.ftp.close()
self.logger.info('FTP connection closed')
self.logger.info('Disconnected from server')
raise TerminateTheScript(self.logger)
def set_console_handler(level):
"""Function to set the StreamHandler log handler
Argument:
level() -
Returns:
console_handler (StreamHandler object): log handler that writes to the console
"""
console_handler = logging.StreamHandler()
console_handler.setFormatter(FORMATTER)
console_handler.setLevel(level)
return console_handler
def set_file_handler():
"""Function to set the FileHandler log handler
Returns:
file_handler (FileHandler object): log handler that writes to the log file
"""
file_handler = logging.FileHandler(LOG_FILE)
file_handler.setFormatter(FORMATTER)
return file_handler
def get_logger(name, level=logging.DEBUG):
"""Function to create the log handler
Returns:
A logging.Logger object with FileHandler and StreamHandler objects
"""
# create a custom logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# add handlers to the logger object
logger.addHandler(set_console_handler(level))
logger.addHandler(set_file_handler())
logger.propagate = False
return logger
def load_json_config(logger, file):
"""
Function that retrieves values from the JSON configuration file
Arguments:
logger(logging.Logger object) - object that handles the FileHandler and StreamHandler
file(str) - JSON configuration file
Returns:
A tuple of dictionaries that contain information from JSON configuration file
"""
logger.info(f'Loading configuration from the JSON file ({JSON_CONFIG})...')
json_file = Path(file)
if not json_file.exists():
raise ConfigDoesNotExistError(logger, json_file)
with open(json_file) as f:
data = json.load(f)
# user credentials for Unix gate
json_gate_details = data['fts_config']['gateway']
# user credentials of different non-MS hosts
json_nonms_details = data['fts_config']['nonms']
# csv-related configuration
json_csv_details = data['fts_config']['csv']
return (json_gate_details, json_nonms_details, json_csv_details)
def check_config(logger, csv_files):
"""Function that checks for existence of required directories (LOG_DIR and CONFIG_DIR),
CSV files and other configuration files
Arguments:
logger(logging.Logger object) - object that handles the FileHandler and StreamHandler
csv_files (list) - List of CSV files under CONFIG_DIR
"""
logger.info('Checking configurations...')
if not LOG_DIR.exists():
raise ConfigDoesNotExistError(logger, f'{LOG_DIR} directory')
if not CONFIG_DIR.exists():
raise ConfigDoesNotExistError(logger, f'{CONFIG_DIR} directory')
for file in csv_files:
csv_file = CONFIG_DIR / file
if not csv_file.exists():
raise ConfigDoesNotExistError(logger, csv_file)
logger.info('Configurations validated')
def ask_user(logger, prompt, header=None, response_type='int', main_dict=None, menu_dict=None, echo=True, quit=True, column=4):
"""
Function to ask user for information that wasn't passed as argument when calling the program.
Parameters:
prompt (str): The user will be prompted by this question
header (bool): Determine whether header will be displayed; True for a 1-question prompt,
False for succeeding questions of a multi-question prompt
response_type (str): Default response is of integer type; can also be string and list (e.g. args.file)
main_dict (dict): Main dictionary to validate user input
menu_dict (dict): Dictionary for values to be displayed in the user menu.
User choice will then be looked up against main dictionary
echo (bool): Determine if user input will be displayed; True by default, False for password prompts
quit (bool): Determine if user prompt will have the [Q/q]uit option
column (int): Number of columns when displaying options to choose from; default 4
Returns:
A tuple of the key and value pair from main dictionary
"""
menu_choice = None
prompt = f'{prompt} ([Q/q] to quit): ' if quit else f'{prompt}: '
while True:
try:
# try until user inputs a valid selection from menu
if header:
print(f'{equal_sign_line}\n{header}\n{len(header) * "-"}\n')
if main_dict or menu_dict:
d = menu_dict if menu_dict else main_dict
# determine the length of the dictionary for right justification in the user menu
right_j = int(math.log10(len(d))) + 1
counter = 0
# diplay the user menu
for key, value in d.items():
end_with = '\n' if counter == column else '\t'
if counter == column:
counter = -1
print(f'{str(key).rjust(right_j)} : {value}', end=end_with)
counter += 1
if not echo:
# for passwords, do not echo user input
answer = getpass.getpass(prompt=prompt)
else:
answer = input(f'{prompt}')
if not answer:
raise EmptyInputError
if quit and answer.lower() == 'q':
raise WeGotOurselvesAQuitter(logger)
# depends on response_type parameter if expecting
# an answer that is an integer or string
if response_type == 'str':
answer = str(answer)
elif response_type == 'list':
answer_tmp = answer
answer = list()
answer = answer_tmp.split()
else:
answer = int(answer)
# if no dictionary is passed (e.g. Username), or for the 'MS Client ID'
# just return that user input
if (not main_dict and not menu_dict) or header == 'MS Client ID':
main_value = answer
if menu_dict:
# user input will be validated against eh menu dictionary first
menu_choice = menu_dict[answer]
answer = menu_choice
if (main_dict or menu_dict) and not header == 'MS Client ID':
# value will then be validated against main dictionary
main_value = main_dict[answer]
except (ValueError, KeyError):
print('\n!!! Invalid selection...\n')
except EmptyInputError:
print('\n!!! Can\'t be empty. Please enter a valid value...\n')
else:
print()
return (main_value, menu_choice)
def parse_csv(filename, logger, sort=False,):
"""
Function to parse a CSV file and store its information into a dictionary
Parameters:
filename (str): CSV filename to be parsed
sort (bool): If values need to be sorted prior to storing into menu dictionary
Returns:
A tuple of 2 dictionaries: one holds the main values while the other for the user menu
"""
# global config_dir
csv_file = CONFIG_DIR / f'{filename}'
csv_file = open(csv_file, newline='')
reader = csv.reader(csv_file)
# parse the csv's header
header_list = next(reader)
# need separate dictionaries for 1) the menu in the user prompt, 2) the main dictionary
menu_dict = {}
main_dict = {}
counter = 1
# if sort is True, temporarily put all instances in a list,
# then sort later so menu will be in alphabetical order
temp_list = []
for row in reader:
val = row[0].lower()
if sort:
# sample dictionary for storing client environment information
# key = instance ID
# value = a tuple of these values in order (instance, hostname, FTP password, 5-char client ID)
# {
# 'instance1' : ('id1', 'nipon01.internal.net', 'abc123', 'XRADI'),
# 'instance2' : ('id2', 'hague01.internal.net', '59K>oSgs', 'MSXYZ')
# }
temp_list.append(val)
main_dict[row[0]] = tuple([row[0], row[1], row[2], row[3]])
else:
menu_dict[counter] = val
main_dict[val] = row[1]
counter += 1
if sort:
# it's not expected that the values in MS_client_accounts.csv are sorted,
# so sort the (temp) list of instances prior to creating the user prompt menu dictionary
temp_list.sort()
counter = 1
for item in temp_list:
menu_dict[counter] = item
counter += 1
logger.info(f'{csv_file.name} successfully loaded')
# return both dictionaries as tuple
return (main_dict, menu_dict)
def check_if_existing(logger, files):
"""Function which checks if file(s) to be uploaded exist locally
Attributes:
logger (logging.Logger object) - Object that handles the FileHandler and StreamHandler
files (list): File(s) to be uploaded
"""
current_dir = Path().absolute()
try:
for item in files:
x = Path(item)
if not x.exists():
raise UploadFileDoesNotExistError(logger, x, current_dir)
except UploadFileDoesNotExistError:
pass
def validate_or_ask_arg(logger, **kwargs):
"""
Function to validate the command line argument passed. If not passed as argument or
other information is needed from user which is not part of the command line arguments,
then call the other function (ask_user) to ask user of it
Arguments:
logger (logging.Logger object) - Object that handles the FileHandler and StreamHandler
kwargs (dict) - keyword arguments
Returns:
The value validated from the main dictionary
"""
arg = kwargs.get('arg', None)
header = kwargs.get('header', None)
prompt = kwargs.get('prompt', None)
response = kwargs.get('response_type', 'int')
main_dict = kwargs.get('main_dict', None)
menu_dict = kwargs.get('menu_dict', None)
valid_dict = kwargs.get('valid_dict', None)
echo = kwargs.get('echo', True)
quit = kwargs.get('quit', True)
other_value = None
input_str = 'accepted'
valid_arg = False
arg_temp = None
if arg:
if valid_dict:
arg = arg.lower()
if arg in valid_dict.keys():
unix_gate = valid_dict[arg]
other_value = arg
valid_arg = True
elif arg in valid_dict.values():
for key, value in valid_dict.items():
if value == arg:
other_value = key
valid_arg = True
if valid_arg:
logger.info(f'{header.title()} {input_str}')
# no need to prompt user since argument passed is already valid
return unix_gate, other_value
else:
logger.warning(
f'{header.title()} passed ({arg}) is invalid!')
# Nullify arg's value so it will be caught by the next condition
arg = None
if not arg:
logger.info(f'User prompted for {header}')
header = header.title() if header else header
arg, other_value = ask_user(logger, prompt=prompt, header=header, response_type=response,
main_dict=main_dict, menu_dict=menu_dict, echo=echo, quit=quit)
input_str = 'selected' if main_dict or menu_dict else 'entered'
arg_temp = arg if echo else '*' * len(arg)
if type(arg) is list:
# specifically for args.file since it's of type list
arg_temp = ', '.join(arg)
logger.info(f'{header} ({arg_temp}) {input_str}')
if other_value and header.lower() != 'server group':
return arg, other_value
return arg
def main():
"""
Main function where command line arguments are validated, user is asked of other necessary details,
and FtpConnection object is created. It establishes the FTP connection and performs file transfer
"""
global curr_dir, config_dir
prog_desc = 'purpose: transfer file to/from a host that is behind a UNIX gateway. file(s) will be transferred in Binary mode.'
choice_prompt = '\n\nYour choice'
username_prompt = 'Login'
passcode_prompt = 'Enter IDLDAP.net Password'
required_str = 'All required arguments passed'
action = {1: 'download', 2: 'upload'}
parser = argparse.ArgumentParser(description=prog_desc, add_help=False)
parser.add_argument('-g', '--gateway', help='UNIX gateway to be used')
parser.add_argument(
'-u', '--username', help='Gateway username. will override the value from JSON file if this argument is passed')
parser.add_argument(
'-p', '--passcode', help='IDLDAP.net password. if -u argument is passed, user will be prompted')
parser.add_argument('-s', '--server', choices=[
'ms', 'nonms'], help='transfer file(s) to either a Managed Services host or a non-MS host')
parser.add_argument('-i', '--instance',
help='Managed Services (only) client instance')
parser.add_argument(
'-a', '--action', choices=['download', 'upload'], help='download or upload')
parser.add_argument('-f', '--file', nargs='*',
help='file(s) to be transferred; separated by spaces')
parser.add_argument('-v', '--verbose', help=f'explain what is being done. though everything is logged in {LOG_FILE}',
action='store_const', const=logging.DEBUG, dest='loglevel', default=logging.ERROR)
parser.add_argument(
'-h', '--help', help='show this help message and exit', action='help')
args = parser.parse_args()
# create a logger object (that has both FileHandler and StreamHandler)
logger = get_logger(__name__, args.loglevel)
logger.info(equal_sign_line)
# logger.info(f'SCRIPT LOG - Start')
logger.info(f'START - {t()}')
logger.info(f'File Transfer Script {__file__} [ Version {VERSION_NO} Build: {BUILD_DATE} at: {BUILD_TIME} ]')
# obtain information from JSON file
json_gate_details, json_nonms_details, json_csv_details = load_json_config(logger, JSON_CONFIG)
csv_dir = json_csv_details['csv_dir']
csv_files = json_csv_details['csv_files']
csv_list = [value for x in range(len(csv_files)) for key, value in csv_files[x].items()]
# check if all required CSV files exist
check_config(logger, csv_list)
# assign each CSV file to their respective variables
gateway_csv, ms_client_csv, non_ms_servers_csv, server_group_csv = csv_list
# =========================================================================
# parse CSV files and load into dictionaries
# UNIX gateway information
gateway_hosts, gateways_menu = parse_csv(gateway_csv, logger)
# server group option: MS or non-MS
server_groups, server_menu = parse_csv(server_group_csv, logger)
# non-MS host options
non_ms_hosts_options, non_ms_hosts_menu = parse_csv(non_ms_servers_csv, logger)
# MS clients' environment information
client_accounts, instance_menu = parse_csv(ms_client_csv, logger, sort=True)
# =========================================================================
# determine which parameters were and were not passed when calling the program
# then check for validity
# a lambda to filter out the arguments passed using filter()
j = lambda k : list(filter(lambda l: bool(l), k))
# some lambda for concatenating some logging prompt/text
x = lambda a, b : f'{a} user ({b}) taken from the JSON file ({JSON_CONFIG})'
y = lambda a, b : f'{a} password ({len(b) * "*"}) taken from the JSON file ({JSON_CONFIG})'
z = lambda a : f'{a} missing from the JSON file ({JSON_CONFIG})!!'
# if there is at least 1 argument passed
if len(j([args.gateway, args.username, args.passcode, args.server, args.action, args.file])):
logger.info('Checking for validity of arguments passed...')
unix_gate, gateway_location = validate_or_ask_arg(
logger, arg=args.gateway, header='Unix gate', prompt=choice_prompt, main_dict=gateway_hosts, menu_dict=gateways_menu, valid_dict=gateway_hosts)
if args.username:
gate_username = validate_or_ask_arg(
logger, arg=args.username, header='Gateway username', prompt=username_prompt, response_type='str', quit=False)
# if user passed the Unix gate username, then user also needs to enter gate password so nullify json_gate_pwd
json_gate_pwd = None
else:
for key, value in json_gate_details.items():
if key == 'username':
json_gate_user = value
elif key == 'password':
json_gate_pwd = value
logger.info(x('Unix gate', json_gate_user))
gate_username = json_gate_user
if args.passcode or not json_gate_pwd:
gate_passcode = validate_or_ask_arg(
logger, arg=args.passcode, header='IDLDAP.net password', prompt=passcode_prompt, response_type='str', quit=False, echo=False)
elif json_gate_pwd:
logger.info(y('Unix gate', json_gate_pwd))
gate_passcode = json_gate_pwd
if args.server == 'nonms':
if args.instance:
logger.warning(
f'Non-MS connection doesn\'t need an instance ({args.instance}) parameter')
args.instance = None
# all necessary arguments for non-MS connection passed
if len(j([args.gateway, args.username, args.passcode, args.action, args.file])) == 5:
logger.info(required_str)
elif args.server == 'ms':
# all necessary arguments for MS connection passed
if len(j([args.gateway, args.username, args.passcode, args.instance, args.action, args.file])) == 6:
logger.info(required_str)
server_group = validate_or_ask_arg(
logger, arg=args.server, header='Server group', prompt=choice_prompt, main_dict=server_groups, menu_dict=server_menu)
if server_group == 'nonms':
# user wants to tranfer file(s) to a non-MS host
remote_host_fqdn, remote_host = validate_or_ask_arg(
logger, header='Non-MS Host', prompt='\n\nTransfer files to/from', main_dict=non_ms_hosts_options, menu_dict=non_ms_hosts_menu)
# initialize; if credentials for this non-MS host do not exist in the JSON file
remote_user = None
remote_pwd = None
for key, value in json_nonms_details.items():
if key == remote_host:
remote_user = value.get('username', None)
if remote_user:
logger.info(x('Non-MS host', remote_user))
remote_pwd = value.get('password', None)
if remote_pwd:
logger.info(y('Non-MS host', remote_pwd))
else:
logger.warning(z(f'Password for {remote_host_fqdn}'))
remote_pwd, temp_val = ask_user(logger, prompt=f"{remote_user}@{remote_host_fqdn}'s password", response_type='str', echo=False, quit=False)
print()
break
if not remote_user:
# non-MS host's credentials are not in the JSON file, so ask user
logger.info(
'User prompted to enter credentials for remote host and file location')
remote_user, temp_val = ask_user(
logger, header=f'Credentials for {remote_host_fqdn}', prompt=username_prompt, response_type='str', quit=False)
remote_pwd, temp_val = ask_user(
logger, prompt=f"{remote_user}@{remote_host_fqdn}'s password", response_type='str', echo=False, quit=False)
remote_dir, temp_val = ask_user(logger,
prompt="Path (absolute) on remote host ('[h/H]ome' for home directory)", response_type='str', quit=False)
if remote_dir.lower() == 'home':
remote_dir = 'home'
else:
# user wants to tranfer file(s) to a MS host
if args.instance:
# if --instance argument passed, then look up for the values in the client_accounts dictionary
try:
remote_user, remote_host_fqdn, remote_pwd, clientID = client_accounts[args.instance]
except KeyError:
logger.warning(
f'MS instance passed as an argument ({args.instance}) is unrecognized!')
args.instance = None
if not args.instance:
# if user invoked --ms but without --instance argument passed,
# or if user provided an unrecognized --instance, then ask user for it
logger.info('User prompted to select a MS client ID from the list...')
# extract all the unique MS Client IDs from the dictionary and display for user menu
# e.g. ALDIS, JCTRL, etc.
temp_list = list(set([ value[3] for key, value in client_accounts.items() ]))
temp_list.sort()
MS_clientID_menu = {}
counter = 1
for id in temp_list:
MS_clientID_menu[counter] = id
counter += 1
temp_val, clientID = ask_user(logger, prompt=choice_prompt, header='MS Client ID', main_dict=client_accounts, menu_dict=MS_clientID_menu)
# extract only the instances for the chosen MS client ID and display for user menu
# e.g. baldis1, baldis2, paldis1, paldis2
MS_client_menu = {}
counter = 1
for key,value in client_accounts.items():
if value[3] == clientID:
MS_client_menu[counter] = key
counter += 1
# extract the remote_user, remote_host_fqdn, remote_pwd and clientID from client_accounts dictionary
(remote_user, remote_host_fqdn, remote_pwd, clientID), temp_val = ask_user(logger,
prompt=choice_prompt, header='Managed Services Instance', main_dict=client_accounts, menu_dict=MS_client_menu, column=7)
# set remote_dir to default directory
remote_dir = f'aiprod{clientID}/implementor/{gate_username}'
# host = f'MS host' if server_group == 'ms' else f'non-MS host'
logger.info(f'Credentials to use: {remote_user}@{remote_host_fqdn}')
# ask user of the action to take
action = validate_or_ask_arg(
logger, arg=args.action, header='Action', prompt=choice_prompt, main_dict=action)
# if not specified in the argument, ask user for file(s) to transfer then split and append to a list
files = validate_or_ask_arg(
logger, arg=args.file, prompt=f'Please specify filename(s) separated by a space', header=f'Files to {action}', response_type='list')
# prior to establishing FTP connection, check first if files exist locally;
# exit if one or more files is missing
if action == 'upload':
logger.info('Validating if upload file(s) exists...')
check_if_existing(logger, files)
logger.info('All file(s) confirmed to exist')
logger.info(equal_sign_line)
# create a FtpConnection object
FTP = FtpConnection(unix_gate, gateway_location, gate_username, gate_passcode,
server_group, args.instance, action, files, remote_host_fqdn, remote_user, remote_pwd, remote_dir, logger)
# establish connection with Unix gate, connect with chosen remote host
# and proceed to transfer files
# errors will be handled within the FtpConnection class
FTP.connect_and_transfer()
logger.info('End of program')
logger.info(f'Logged everything in {LOG_FILE}')
logger.info('Thank you for using the script!')
logger.info(f'END - {t()}')
logger.info(equal_sign_line)
if __name__ == '__main__':
# call the main function
main()
|
jstest.py
|
"""
unittest.TestCase for JavaScript tests.
"""
from __future__ import absolute_import
import os
import os.path
import shutil
import sys
import threading
from . import interface
from ... import config
from ... import core
from ... import utils
class JSTestCase(interface.TestCase):
"""
A jstest to execute.
"""
REGISTERED_NAME = "js_test"
# A wrapper for the thread class that lets us propagate exceptions.
class ExceptionThread(threading.Thread):
def __init__(self, my_target, my_args):
threading.Thread.__init__(self, target=my_target, args=my_args)
self.err = None
def run(self):
try:
threading.Thread.run(self)
except:
self.err = sys.exc_info()[1]
else:
self.err = None
def _get_exception(self):
return self.err
DEFAULT_CLIENT_NUM = 1
def __init__(self,
logger,
js_filename,
shell_executable=None,
shell_options=None,
test_kind="JSTest"):
"""Initializes the JSTestCase with the JS file to run."""
interface.TestCase.__init__(self, logger, test_kind, js_filename)
# Command line options override the YAML configuration.
self.shell_executable = utils.default_if_none(config.MONGO_EXECUTABLE, shell_executable)
self.js_filename = js_filename
self.shell_options = utils.default_if_none(shell_options, {}).copy()
self.num_clients = JSTestCase.DEFAULT_CLIENT_NUM
def configure(self, fixture, num_clients=DEFAULT_CLIENT_NUM, *args, **kwargs):
interface.TestCase.configure(self, fixture, *args, **kwargs)
if self.fixture.port is not None:
self.shell_options["port"] = self.fixture.port
global_vars = self.shell_options.get("global_vars", {}).copy()
data_dir = self._get_data_dir(global_vars)
# Set MongoRunner.dataPath if overridden at command line or not specified in YAML.
if config.DBPATH_PREFIX is not None or "MongoRunner.dataPath" not in global_vars:
# dataPath property is the dataDir property with a trailing slash.
data_path = os.path.join(data_dir, "")
else:
data_path = global_vars["MongoRunner.dataPath"]
global_vars["MongoRunner.dataDir"] = data_dir
global_vars["MongoRunner.dataPath"] = data_path
# Don't set the path to the executables when the user didn't specify them via the command
# line. The functions in the mongo shell for spawning processes have their own logic for
# determining the default path to use.
if config.MONGOD_EXECUTABLE is not None:
global_vars["MongoRunner.mongodPath"] = config.MONGOD_EXECUTABLE
if config.MONGOS_EXECUTABLE is not None:
global_vars["MongoRunner.mongosPath"] = config.MONGOS_EXECUTABLE
if self.shell_executable is not None:
global_vars["MongoRunner.mongoShellPath"] = self.shell_executable
test_data = global_vars.get("TestData", {}).copy()
test_data["minPort"] = core.network.PortAllocator.min_test_port(fixture.job_num)
test_data["maxPort"] = core.network.PortAllocator.max_test_port(fixture.job_num)
global_vars["TestData"] = test_data
self.shell_options["global_vars"] = global_vars
shutil.rmtree(data_dir, ignore_errors=True)
self.num_clients = num_clients
try:
os.makedirs(data_dir)
except os.error:
# Directory already exists.
pass
def _get_data_dir(self, global_vars):
"""
Returns the value that the mongo shell should set for the
MongoRunner.dataDir property.
"""
# Command line options override the YAML configuration.
data_dir_prefix = utils.default_if_none(config.DBPATH_PREFIX,
global_vars.get("MongoRunner.dataDir"))
data_dir_prefix = utils.default_if_none(data_dir_prefix, config.DEFAULT_DBPATH_PREFIX)
return os.path.join(data_dir_prefix,
"job%d" % (self.fixture.job_num),
config.MONGO_RUNNER_SUBDIR)
def run_test(self):
threads = []
try:
# Don't thread if there is only one client.
if self.num_clients == 1:
shell = self._make_process(self.logger)
self._execute(shell)
else:
# If there are multiple clients, make a new thread for each client.
for i in xrange(self.num_clients):
t = self.ExceptionThread(my_target=self._run_test_in_thread, my_args=[i])
t.start()
threads.append(t)
except self.failureException:
raise
except:
self.logger.exception("Encountered an error running jstest %s.", self.basename())
raise
finally:
for t in threads:
t.join()
for t in threads:
if t._get_exception() is not None:
raise t._get_exception()
def _make_process(self, logger=None, thread_id=0):
# Since _make_process() is called by each thread, we make a shallow copy of the mongo shell
# options to avoid modifying the shared options for the JSTestCase.
shell_options = self.shell_options.copy()
global_vars = shell_options["global_vars"].copy()
test_data = global_vars["TestData"].copy()
# We set a property on TestData to mark the main test when multiple clients are going to run
# concurrently in case there is logic within the test that must execute only once. We also
# set a property on TestData to indicate how many clients are going to run the test so they
# can avoid executing certain logic when there may be other operations running concurrently.
is_main_test = thread_id == 0
test_data["isMainTest"] = is_main_test
test_data["numTestClients"] = self.num_clients
global_vars["TestData"] = test_data
shell_options["global_vars"] = global_vars
# If logger is none, it means that it's not running in a thread and thus logger should be
# set to self.logger.
logger = utils.default_if_none(logger, self.logger)
return core.programs.mongo_shell_program(logger,
executable=self.shell_executable,
filename=self.js_filename,
**shell_options)
def _run_test_in_thread(self, thread_id):
# Make a logger for each thread. When this method gets called self.logger has been
# overridden with a TestLogger instance by the TestReport in the startTest() method.
logger = self.logger.new_test_thread_logger(self.test_kind, str(thread_id))
shell = self._make_process(logger, thread_id)
self._execute(shell)
|
ArUco detection.py
|
#Procedimentos:
#1: Definir a origem e calibrar a posição do objeto na origem escolhida
#2: Calibrar a profundidade (z) utilizando regressão linear (medida real x medida obtida)
#3: Calibrar a relação entre a variável z e as coordenadas x e y. Relacionar os valores de (x,z) e (y,z).
import cv2 as cv
from threading import Thread
import numpy as np
from numpy.linalg import inv, det
import scipy as sci
from scipy.spatial.transform import Rotation as R
from imutils.video import WebcamVideoStream
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import serial
import collections
import glob
import sys
import time
import math
from numpy import cos as c
from numpy import sin as s
from vpython import *
#Calibration position: In the beggining of the video, the script will get 40 early datas to compute the mean
#measurements and so find offset values. Having the offset, we can calculate the relative origin.
#Keep in mind that the marker's position was acquired with respect to camera frame.
isReceive = False
isRun = True
gx = 0.0
gy = 0.0
gz = 0.0
ax = 0.0
ay = 0.0
az = 0.0
cx = 0.0
cy = 0.0
cz = 0.0
dt = 0.1
g = 9.81
q_ant = np.array([1.0, 0.0, 0.0, 0.0])
x_ant_ori = np.array([[0], [0], [0], [0], [0], [0]])
P_ant_ori = np.eye(6)*1000
x_ant_pos = np.array([[0], [0], [0], [0], [0], [0]])
P_ant_pos = np.eye(6)*1000
def EKF_Accel_Camera(x_ant, P_ant, accel, y, dt):
# Q ----> Process Noise Covariance
# C ----> Control Noise Covariance
# R ---> Measurement Noise Covariance
F = np.array([[1, 0, 0, dt, 0, 0],
[0, 1, 0, 0, dt, 0],
[0, 0, 1, 0, 0, dt],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
V = np.array([[0.5*dt**2, 0, 0],
[0, 0.5*dt**2, 0],
[0, 0, 0.5*dt**2],
[dt, 0, 0],
[0, dt, 0],
[0, 0, dt]])
H = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0]])
Q = np.eye(6)
R = np.eye(3)
#Process Equations
x = F@x_ant + V@accel
#State covariance matrix
P = F@P_ant@F.T + Q
#Kalman Gain and Inovation
K = P@H.T@inv(H@P@H.T + R)
z = y - H@x
#Update step
x_est = x + K@z
P_est = P - K@H@P
return x_est, P_est
def KF_Orientation(x_ant, P_ant, y, dt):
A = np.array([[1, 0, 0, dt, 0, 0],
[0, 1, 0, 0, dt, 0],
[0, 0, 1, 0, 0, dt],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
B = np.array([[]])
H = np.eye(6)
Q = np.array([[10, 0, 0, 0, 0, 0],
[0, 10, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
R = np.array([[.1, 0, 0, 0, 0, 0],
[0, .1, 0, 0, 0, 0],
[0, 0, .01, 0, 0, 0],
[0, 0, 0, .01, 0, 0],
[0, 0, 0, 0, .01, 0],
[0, 0, 0, 0, 0, .01]])
#Prediction
x_p = A@x_ant
P_p = A@P_ant@A.T + Q
#Kalman Gain and Inovation
K = P_p@H.T@inv(H@P_p@H.T + R)
inov = y - H@x_p
#Update
x_est = x_p + K@inov
P_est = P_p - K@H@P_p
return x_est, P_est
def KF_Position(x_ant, P_ant, u, y, dt):
A = np.array([[1, 0, 0, dt, 0, 0],
[0, 1, 0, 0, dt, 0],
[0, 0, 1, 0, 0, dt],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
B = np.array([[dt**2/2, 0, 0],
[0, dt**2/2, 0],
[0, 0, dt**2/2],
[dt, 0, 0],
[0, dt, 0],
[0, 0, dt]])
H = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0]])
Q = np.array([[100, 0, 0, 0, 0, 0],
[0, 100, 0, 0, 0, 0],
[0, 0, 100, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
R = np.array([[100, 0, 0],
[0, 100, 0],
[0, 0, 100]])
#Prediction
x_p = A@x_ant + B@u
P_p = A@P_ant@A.T + Q
#Kalman Gain and Inovation
K = P_p@H.T@inv(H@P_p@H.T + R)
inov = y - H@x_p
#Update
x_est = x_p + K@inov
P_est = P_p - K@H@P_p
return x_est, P_est
def quat_product(a, b):
k1 = a[0]*b[0] - a[1]*b[1] - a[2]*b[2] - a[3]*b[3]
k2 = a[0]*b[1] + a[1]*b[0] + a[2]*b[3] - a[3]*b[2]
k3 = a[0]*b[2] - a[1]*b[3] + a[2]*b[0] + a[3]*b[1]
k4 = a[0]*b[3] + a[1]*b[2] - a[2]*b[1] + a[3]*b[0]
k = np.array([k1, k2, k3, k4])
return k
def MadgwickUpdate_OnlyAccel(gx, gy, gz, ax, ay, az, cx, cy, cz, dt, q_ant):
q0 = q_ant[0]
q1 = q_ant[1]
q2 = q_ant[2]
q3 = q_ant[3]
beta = 0.5
# Rate of change of quaternion from gyroscope
qDot1 = 0.5 * (-q1 * gx - q2 * gy - q3 * gz)
qDot2 = 0.5 * (q0 * gx + q2 * gz - q3 * gy)
qDot3 = 0.5 * (q0 * gy - q1 * gz + q3 * gx)
qDot4 = 0.5 * (q0 * gz + q1 * gy - q2 * gx)
# Compute feedback only if accelerometer measurement valid (avoids NaN in accelerometer normalisation)
if ax !=0 and ay != 0 and az != 0:
# Normalise accelerometer measurement
recipNorm = 1/(math.sqrt(ax * ax + ay * ay + az * az))
ax *= recipNorm
ay *= recipNorm
az *= recipNorm
print('ax: {0}, ay: {1}, az: {2}'.format(ax, ay, az))
# Auxiliary variables to avoid repeated arithmetic
_2q0 = 2.0 * q0
_2q1 = 2.0 * q1
_2q2 = 2.0 * q2
_2q3 = 2.0 * q3
_4q0 = 4.0 * q0
_4q1 = 4.0 * q1
_4q2 = 4.0 * q2
_4q3 = 4.0 * q3
_8q1 = 8.0 * q1
_8q2 = 8.0 * q2
q0q0 = q0 * q0
q1q1 = q1 * q1
q2q2 = q2 * q2
q3q3 = q3 * q3
f1 = 2*(q1*q3 - q0*q2) - ax
f2 = 2*(q0*q1 + q2*q3) - ay
f3 = 2*(0.5 - q1q1 - q2q2) - az
f4 = 2*(0.5 - q2q2 - q3q3) - cx
f5 = 2*(q1*q2 - q0*q3) - cy
f6 = 2*(q0*q2 + q1*q3) - cz
print('cx: {0}, cy: {1}, cz: {2}'.format(cx, cy, cz))
# Gradient decent algorithm corrective step
s0 = -_2q2*f1 + _2q1*f2 - _2q3*f5 + _2q2*f6
s1 = _2q3*f1 + _2q0*f2 - _4q1*f3 + _2q2*f5 + _2q3*f6
s2 = -_2q0*f1 + _2q3*f2 - _4q2*f3 - _4q2*f4 + _2q1*f5 + _2q0*f6
s3 = _2q1*f1 + _2q2*f2 - _4q3*f4 - _2q0*f5 + _2q1*f6
recipNorm = 1/ (math.sqrt(s0 * s0 + s1 * s1 + s2 * s2 + s3 * s3)) ## normalise step magnitude
s0 *= recipNorm
s1 *= recipNorm
s2 *= recipNorm
s3 *= recipNorm
# Apply feedback step
qDot1 -= beta * s0
qDot2 -= beta * s1
qDot3 -= beta * s2
qDot4 -= beta * s3
# Integrate rate of change of quaternion to yield quaternion
q0 += qDot1 * dt
q1 += qDot2 * dt
q2 += qDot3 * dt
q3 += qDot4 * dt
# Normalise quaternion
recipNorm = 1 / (math.sqrt(q0 * q0 + q1 * q1 + q2 * q2 + q3 * q3))
q0 *= recipNorm
q1 *= recipNorm
q2 *= recipNorm
q3 *= recipNorm
q = np.array([q0, q1, q2, q3])
# print('q0: {0}, q1: {1}, q2: {2}, q3: {3}'.format(q[0], q[1], q[2], q[3]))p
return q
def computeAngles(q0, q1, q2, q3):
roll = 180*math.atan2(q0*q1 + q2*q3, 0.5 - q1*q1 - q2*q2)/math.pi
pitch = 180*math.asin(-2.0 * (q1*q3 - q0*q2))/math.pi
yaw = 180*math.atan2(q1*q2 + q0*q3, 0.5 - q2*q2 - q3*q3)/math.pi
return roll, pitch, yaw
def getData():
time.sleep(1.0)
serialConnection.reset_input_buffer()
while(isRun):
global isReceive
global roll, pitch, yaw
global q0, q1, q2, q3
global accel
global ax, az, ay, gx, gy, gz
data = serialConnection.readline()
data = str(data, 'utf-8')
splitdata = data.split(',')
q0 = float(splitdata[0])
q1 = float(splitdata[1])
q2 = float(splitdata[2])
q3 = float(splitdata[3])
ax = float(splitdata[4])
ay = float(splitdata[5])
az = float(splitdata[6])
gx = float(splitdata[7])
gy = float(splitdata[8])
gz = float(splitdata[9])
accel = np.array([[ax], [ay], [az]])
roll, pitch, yaw = computeAngles(q0, q1, q2, q3)
isReceive = True
serialPort = 'COM5'
baudRate = 9600
try:
serialConnection = serial.Serial(serialPort, baudRate)
except:
print('Cannot connect to the port')
thread = Thread(target=getData)
thread.start()
#Load the predefinied dictionary
dictionary = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50)
# Load previously saved calibration data
path = './camera_data/camera_calibration.npz'
npzfile = np.load(path)
#Camera Matrix
mtx = npzfile[npzfile.files[0]]
#Distortion Matrix
dist = npzfile[npzfile.files[1]]
print(mtx ,dist)
#Font setup
font = cv.FONT_HERSHEY_PLAIN
start_time = time.time()
#Create position data.txt
pos_data = open("pos_data.txt",'w')
pos_data.close()
#Create attitude data.txt
angle_data = open("angle_data.txt", 'w')
angle_data.close()
#Camera instance with thread
cap = WebcamVideoStream(src=0).start()
# cap = cv.VideoCapture(0)
#Create board object
# board = cv.aruco.GridBoard_create(2, 2, 0.037, 0.005, dictionary)
start_clock = time.perf_counter()
#Declare somes important variables
rvecs = None
tvecs = None
pos_calib_obj = True
offset_xo = 0
offset_yo = 0
offset_zo = 0
frame_id = 0
roll_o = []
pitch_o = []
yaw_o = []
roll_c = []
pitch_c = []
yaw_c = []
pos_xo = []
pos_yo = []
pos_zo = []
roll_obj = 0
pitch_obj = 0
yaw_obj = 0
alfa = 0
beta = 0
gama = 0
while True:
img = cap.read()
# img = cv.rotate(img, cv.ROTATE_180)
#Convert frame to gray scale
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
#Frame count increment
frame_id += 1
#Set parameters for the marker tracking
parameters = cv.aruco.DetectorParameters_create()
parameters.minMarkerPerimeterRate = 0.1
# parameters.minDistanceToBorder = 6
# parameters.adaptiveThreshConstant = 25
parameters.cornerRefinementWinSize = 3
parameters.cornerRefinementMethod = cv.aruco.CORNER_REFINE_CONTOUR
# parameters.cornerRefinementMaxIterations = 10
parameters.cornerRefinementMinAccuracy = 0
#Detect the markers in the image
markerCorners, markerIDs, rejectedCandidates = cv.aruco.detectMarkers(gray, dictionary, parameters = parameters)
#Refine detection
# markerCorners, markerIDs, rejectedCandidates, recoveredIds = cv.aruco.refineDetectedMarkers(img, board, markerCorners,
# markerIDs, rejectedCandidates, mtx, dist)
#Open position file to write data position
pos_data = open("pos_data.txt", "a+")
#Open attitude file to write estimation
angle_data = open("angle_data.txt", "a+")
# print('ID:', markerIDs)
#Verify if there is some marker detected
if markerIDs is not None and len(markerIDs)==2:
for i in range(0, len(markerCorners)):
# print("Marker Detected")
#Compute board's pose
#Reference marker
if markerIDs[i]==10:
# pose, rvecs, tvecs = cv.aruco.estimatePoseBoard(markerCorners, markerIDs, board, mtx, dist, rvecs, tvecs)
rvec_ref, tvec_ref, _ = cv.aruco.estimatePoseSingleMarkers(markerCorners[i], 0.066, mtx, dist)
rvec_ref = np.reshape(rvec_ref, (3,1))
tvec_ref = np.reshape(tvec_ref, (3,1))
#Use Rodrigues formula to transform rotation vector into matrix
#Pose marker w.r.t camera reference frame
R_rc, _ = cv.Rodrigues(rvec_ref)
#Homogeneous Transformation Fixed Frame to Camera Frame
last_col = np.array([[0, 0, 0, 1]])
T_rc = np.concatenate((R_rc, tvec_ref), axis=1)
T_rc = np.concatenate((T_rc, last_col), axis=0)
#Homegeneous Transformation Camera Frame to Fixed Frame
T_cr = np.linalg.inv(T_rc)
#Euler angles
# r_ref = sci.spatial.transform.Rotation.from_matrix(T_cr[0:3, 0:3])
# euler_ref = r_ref.as_euler('ZYX', degrees=True)
# phi_ref = euler_ref[0]
# theta_ref = euler_ref[1]
# psi_ref = euler_ref[2]
# print("phi:", phi_ref)
# print("theta:", theta_ref)
# print("psi:", psi_ref)
#Attitude estimation
# phi = 180*math.atan2(R_marker[2,1], R_marker[2,2])/math.pi
# theta = 180*math.atan2(-R_matrix[2, 0], math.sqrt(R_matrix[2,1]**2 + R_matrix[2,2]**2))/math.pi
# psi = 180*math.atan2(R_matrix[1,0], R_matrix[0,0])/math.pi
cv.aruco.drawAxis(img, mtx, dist, rvec_ref, tvec_ref, 0.033)
cv.aruco.drawDetectedMarkers(img, markerCorners)
#Object marker
if markerIDs[i]==4:
# pose, rvecs, tvecs = cv.aruco.estimatePoseBoard(markerCorners, markerIDs, board, mtx, dist, rvecs, tvecs)
rvec_obj, tvec_obj, _ = cv.aruco.estimatePoseSingleMarkers(markerCorners[i], 0.066, mtx, dist)
rvec_obj = np.reshape(rvec_obj, (3,1))
tvec_obj = np.reshape(tvec_obj, (3,1))
#Use Rodrigues formula to transform rotation vector into matrix
R_dc, _ = cv.Rodrigues(rvec_obj)
#Homogeneous Transformation Object Frame to Camera Frame
T_dc = np.concatenate((R_dc, tvec_obj), axis=1)
T_dc = np.concatenate((T_dc, last_col), axis=0)
#Homogeneous Transformation Object Frame to Fixed Frame
T_dr = T_cr@T_dc
#Getting quaternions from rotation matrix
r_obj = sci.spatial.transform.Rotation.from_matrix(T_dr[0:3, 0:3])
q_obj = r_obj.as_quat()
# # print('Quaternion:')
# # print(q)
roll_obj, pitch_obj, yaw_obj = computeAngles(q_obj[3], q_obj[1], q_obj[0], -q_obj[2])
roll_obj -= 8.2
pitch_obj += 7.2
# euler_obj = r_obj.as_euler('ZYX', degrees=True)
# phi_obj = euler_obj[0]
# theta_obj = euler_obj[1]
# psi_obj = euler_obj[2]
# print("phi:", phi_obj)
# print("theta:", theta_obj)
# print("psi:", psi_obj)
#Position correction
# xf_obj = 0.755*float(T_dr[0,3]) - 0.193
# yf_obj = 1.06*float(T_dr[1,3]) + 0.018
# zf_obj = 0.709*float(T_dr[2,3]) + 0.0687
xf_obj = 0.774*float(T_dr[0,3]) - 0.129
yf_obj = -0.349*float(T_dr[1,3])**2 + 1.09*float(T_dr[1,3]) + 0.129
zf_obj = 0.767*float(T_dr[2,3]) - 0.0694
# if pos_calib_obj:
# #Position correction
# pos_xo.append(xf_obj)
# pos_yo.append(yf_obj)
# pos_zo.append(zf_obj)
# if len(pos_xo) == 100 and len(pos_yo) == 100 and len(pos_zo)==100:
# # print('X: {0}, Y: {1}, Z: {2}'.format(np.average(pos_xo), np.average(pos_yo), np.average(pos_zo)))
# pos_xo=[]
# pos_yo=[]
# pos_zo=[]
cv.aruco.drawAxis(img, mtx, dist, rvec_obj, tvec_obj, 0.033)
print(tvec_obj)
cv.aruco.drawDetectedMarkers(img, markerCorners)
modulo = np.sqrt(q_obj[0]**2 + q_obj[1]**2 + q_obj[2]**2 + q_obj[3]**2)
#Print position values in frame
cv.putText(img, "X:"+str(np.round(float(xf_obj), 4)), (80,600), font, 1, (0,0,0), 2)
cv.putText(img, "Y:"+str(np.round(float(yf_obj), 4)), (180,600), font, 1, (0,0,0), 2)
cv.putText(img, "Z:"+str(np.round(float(zf_obj), 4)), (280,600), font, 1, (0,0,0), 2)
cv.putText(img, "Orientacao Estimada por Camera:", (10, 200), font, 1, (255, 255, 255), 2)
cv.putText(img, "Phi:"+str(np.round(float(roll_obj), 2)), (10,220), font, 1, (0,0,255), 2)
cv.putText(img, "Theta:"+str(np.round(float(pitch_obj), 2)), (10,240), font, 1, (0,255,0), 2)
cv.putText(img, "Psi:"+str(np.round(float(yaw_obj), 2)), (10,260), font, 1, (255,0,0), 2)
# cv.putText(img, "q0:"+str(np.round(float(q_obj[3]), 3)), (500,120), font, 1, (255,255,255), 2)
# cv.putText(img, "q1:"+str(np.round(float(q_obj[0]), 3)), (500,140), font, 1, (255,255,255), 2)
# cv.putText(img, "q2:"+str(np.round(float(q_obj[1]), 3)), (500,160), font, 1, (255,255,255), 2)
# cv.putText(img, "q3:"+str(np.round(float(q_obj[2]), 3)), (500,180), font, 1, (255,255,255), 2)
# cv.putText(img, "Modulo:" + str(np.round(float(modulo), 3)), (500, 210), font, 1, (255, 0, 0),2)
#Data sensor
if isReceive:
# cv.putText(img, "q0:"+str(np.round(float(q0), 3)), (500,320), font, 1, (0,0,0), 2)
# cv.putText(img, "q1:"+str(np.round(float(q1), 3)), (500,340), font, 1, (0,0,0), 2)
# cv.putText(img, "q2:"+str(np.round(float(q2), 3)), (500,360), font, 1, (0,0,0), 2)
# cv.putText(img, "q3:"+str(np.round(float(q3), 3)), (500,380), font, 1, (0,0,0), 2)
cv.putText(img, "Orientacao Estimada por IMU:", (10, 300), font, 1, (255,255,255), 2)
cv.putText(img, "Roll:"+str(np.round(float(roll), 3)), (10,320), font, 1, (0,0,255), 2)
cv.putText(img, "Pitch:"+str(np.round(float(pitch), 3)), (10,340), font, 1, (0,255,0), 2)
cv.putText(img, "Psi:"+str(np.round(float(yaw), 3)), (10,360), font, 1, (255,0,0), 2)
#########
roll_c.append(roll_obj)
pitch_c.append(pitch_obj)
yaw_c.append(yaw_obj)
if len(roll_c) == 100 and len(pitch_c) == 100 and len(yaw_c)==100:
print('Roll_C: {0}, Pitch_C: {1}, Yaw_C: {2}'.format(np.average(roll_c), np.average(pitch_c), np.average(yaw_c)))
roll_c=[]
pitch_c=[]
yaw_c=[]
######
roll_o.append(roll)
pitch_o.append(pitch)
yaw_o.append(yaw)
if len(roll_o) == 100 and len(pitch_o) == 100 and len(yaw_o)==100:
print('Roll: {0}, Pitch: {1}, Yaw: {2}'.format(np.average(roll_o), np.average(pitch_o), np.average(yaw_o)))
roll_o=[]
pitch_o=[]
yaw_o=[]
#Kalman Filter - Orientation Estimation
y_ori = np.array([[roll],[pitch],[yaw_obj],[gx],[gy],[gz]])
x_est, P_est = KF_Orientation(x_ant_ori, P_ant_ori, y_ori, dt)
x_ant_ori = x_est
P_ant_ori = P_est
roll_est = x_est[0]
pitch_est = x_est[1]
yaw_est = x_est[2]
cv.putText(img, "Orientacao Estimada por Filtro:", (10, 400), font, 1, (255,255,255), 2)
cv.putText(img, "Roll:"+str(np.round(float(roll_est), 3)), (10,420), font, 1, (0,0,255), 2)
cv.putText(img, "Pitch:"+str(np.round(float(pitch_est), 3)), (10,440), font, 1, (0,255,0), 2)
cv.putText(img, "Psi:"+str(np.round(float(yaw_est), 3)), (10,460), font, 1, (255,0,0), 2)
#Kalman Filter - Position Estimation
R_IMU = R.from_euler('zyx',[[float(yaw_est), float(pitch_est), float(roll_est)]], degrees=True).as_matrix()
y_pos = np.array([[xf_obj],[yf_obj],[zf_obj]])
u_pos = R_IMU@np.array([[ax],[ay],[az]]) - np.array([[0],[0],[9.81]])
# print(u_pos)
pos_est, P_pos = KF_Position(x_ant_pos, P_ant_pos, u_pos.reshape(3,1), y_pos, dt)
x_ant_pos = pos_est
P_ant_pos = P_pos
x_kf = pos_est[0]
y_kf = pos_est[1]
z_kf = pos_est[2]
cv.putText(img, "X:"+str(np.round(float(x_kf), 4)), (80,700), font, 1, (0,0,0), 2)
cv.putText(img, "Y:"+str(np.round(float(y_kf), 4)), (180,700), font, 1, (0,0,0), 2)
cv.putText(img, "Z:"+str(np.round(float(z_kf), 4)), (280,700), font, 1, (0,0,0), 2)
#Matriz de rotação dinâmica referencia
if elapsed_time > 10:
if gama < np.pi/4:
gama += np.pi/800
elif beta < np.pi/6:
beta += np.pi/800
R_ref = np.array([[c(alfa)*c(gama) - s(alfa)*c(beta)*s(gama), s(alfa)*c(gama) + c(alfa)*c(beta)*s(gama), s(beta)*s(gama)],
[-c(alfa)*s(gama) - s(alfa)*c(beta)*c(gama), -s(alfa)*s(gama)+c(alfa)*c(beta)*c(gama), s(beta)*c(gama)],
[s(alfa)*s(beta), -c(alfa)*s(beta), c(beta)]])
R_ref = T_rc[0:3, 0:3]@R_ref
rvec_ref,_ = cv.Rodrigues(R_ref)
#Getting quaternions from rotation matrix
r_refe = sci.spatial.transform.Rotation.from_matrix(T_cr[0:3,0:3]@R_ref)
q_ref = r_refe.as_quat()
# # print('Quaternion:')
# # print(q)
roll_ref, pitch_ref, yaw_ref = computeAngles(q_ref[3], q_ref[1], q_ref[0], -q_ref[2])
cv.drawFrameAxes(img, mtx, dist, rvec_ref, np.array([[-0.2],[-0.1],[1.6]]), 0.066, )
cv.putText(img, "Orientacao Dinamica de Referencia:", (500, 200), font, 1, (255, 255, 255), 2)
cv.putText(img, "Phi:"+str(np.round(float(roll_ref), 2)), (500,220), font, 1, (0,0,255), 2)
cv.putText(img, "Theta:"+str(np.round(float(pitch_ref), 2)), (500,240), font, 1, (0,255,0), 2)
cv.putText(img, "Psi:"+str(np.round(float(yaw_ref), 2)), (500,260), font, 1, (255,0,0), 2)
#########################################################################################################
#Write position data in pos_data.txt
pos_data.write("{:.4f} , ".format(float(xf_obj)) + "{:.4f} , ".format(float(yf_obj)) + "{:.4f} , ".format(float(zf_obj)) + str(time.perf_counter()-start_clock) + "\n")
#Write attitude data in file
angle_data.write("{:.2f} , ".format(float(roll_ref)) + "{:.2f} , ".format(float(pitch_ref)) + "{:.2f} , ".format(float(yaw_ref))
+ "{:.2f} , ".format(float(roll_est)) + "{:.2f} , ".format(float(pitch_est)) + "{:.2f} , ".format(float(yaw_est)) + str(time.perf_counter()-start_clock) + "\n")
#Compute FPS
elapsed_time = time.perf_counter() - start_clock
print("Start time: {0} \n Elapsed Time: {1}".format(start_clock, elapsed_time))
fps = int(frame_id / elapsed_time)
#Print FPS on the screen
cv.putText(img, "FPS:" + str(fps), (10, 80), font, 1, (255,255,255), 1)
cv.putText(img, "Time Elapsed:" + str(round(elapsed_time,1)), (100, 80), font, 1, (255,255,255), 1)
cv.imshow('img', img)
key = cv.waitKey(10)
if key == ord('n') or key == ord('p'):
break
pos_data.close()
angle_data.close()
cap.stop()
# cap.release()
cv.destroyAllWindows()
isRun = False
thread.join()
serialConnection.close()
plt.style.use("fivethirtyeight")
fig, (ax, ax2, ax3) = plt.subplots(3, 1, figsize=(10,10), sharex=True)
rr_list, pr_list, yr_list, re_list, pe_list, ye_list, time = [], [], [], [], [], [], []
data = open("angle_data.txt", "r").read()
lines = data.split('\n')
for line in lines:
if len(line)>1:
rr, pr, yr, re, pe, ye, times = line.split(' , ')
rr_list.append(float(rr))
pr_list.append(float(pr))
yr_list.append(float(yr))
re_list.append(float(re))
pe_list.append(float(pe))
ye_list.append(float(ye))
time.append(float(times))
ax.plot(time, rr_list, 'r--', alpha=0.6, label = r'$\phi_{ref} (t)$', linewidth = 1.5)
ax.plot(time, re_list, 'r', label=r'$\phi_{est}(t)$', linewidth=1)
ax2.plot(time, pr_list, 'g--', alpha=0.6, label = r'$\theta_{ref} (t)$', linewidth=1.5)
ax2.plot(time, pe_list, 'g', label=r'$\theta_{est}(t)$', linewidth=1)
ax3.plot(time, yr_list, 'b--', alpha=0.6, label = r'$\psi_{ref} (t)$', linewidth=1.5)
ax3.plot(time, ye_list, 'b', label=r'$\psi_{est}(t)$', linewidth=1)
ax3.set_xlabel('Time (s)')
ax.set_ylabel(r'$\phi$ (°)')
ax2.set_ylabel(r'$\theta$ (°)')
ax3.set_ylabel(r'$\psi$ (°)')
ax.set_title('Orientação')
ax.set_ylim([-90, 90])
ax2.set_ylim([-90, 90])
ax3.set_ylim([-90, 90])
ax.legend()
ax2.legend()
ax3.legend()
plt.tight_layout()
plt.show()
|
websocket_client_test.py
|
# encoding: UTF-8
import json
import ssl
import sys
import traceback
import socket
from datetime import datetime
from threading import Lock, Thread
from time import sleep
import websocket
from vnpy.gateway.gateway_test.my_log import MyLog
class WebsocketClient(object):
"""
Websocket API
After creating the client object, use start() to run worker and ping threads.
The worker thread connects websocket automatically.
Use stop to stop threads and disconnect websocket before destroying the client
object (especially when exiting the programme).
Default serialization format is json.
Callbacks to overrides:
* unpack_data
* on_connected
* on_disconnected
* on_packet
* on_error
After start() is called, the ping thread will ping server every 60 seconds.
If you want to send anything other than JSON, override send_packet.
"""
def __init__(self):
"""Constructor"""
self.host = None
self._ws_lock = Lock()
self._ws = None
self._worker_thread = None
self._ping_thread = None
self._active = False
self.proxy_host = None
self.proxy_port = None
self.ping_interval = 10 # seconds
self.header = {}
# For debugging
self._last_sent_text = None
self._last_received_text = None
self.log = MyLog()
def write_log(self, msg):
self.log.logger.info(msg)
def init(self, host: str, proxy_host: str = "", proxy_port: int = 0, ping_interval: int = 60, header: dict = None):
"""
:param ping_interval: unit: seconds, type: int
"""
self.host = host
self.ping_interval = ping_interval # seconds
if header:
self.header = header
if proxy_host and proxy_port:
self.proxy_host = proxy_host
self.proxy_port = proxy_port
def start(self):
"""
Start the client and on_connected function is called after webscoket
is connected succesfully.
Please don't send packet untill on_connected fucntion is called.
"""
self._active = True
self._worker_thread = Thread(target=self._run)
self._worker_thread.start()
self._ping_thread = Thread(target=self._run_ping)
self._ping_thread.start()
def stop(self):
"""
Stop the client.
"""
self._active = False
self._disconnect()
def join(self):
"""
Wait till all threads finish.
This function cannot be called from worker thread or callback function.
"""
self._ping_thread.join()
self._worker_thread.join()
def send_packet(self, packet: dict):
"""
Send a packet (dict data) to server
override this if you want to send non-json packet
"""
text = json.dumps(packet)
self._record_last_sent_text(text)
return self._send_text(text)
def _send_text(self, text: str):
"""
Send a text string to server.
"""
ws = self._ws
if ws:
ws.send(text, opcode=websocket.ABNF.OPCODE_TEXT)
def _send_binary(self, data: bytes):
"""
Send bytes data to server.
"""
ws = self._ws
if ws:
ws._send_binary(data)
def _reconnect(self):
""""""
if self._active:
self.write_log("_reconnect: 断开连接")
self._disconnect()
self.write_log("_reconnect: 重新连接")
self._connect()
def _create_connection(self, *args, **kwargs):
""""""
return websocket.create_connection(*args, **kwargs)
def _connect(self):
"""这里需要修改"""
self._ws = self._create_connection(
self.host,
sslopt={"cert_reqs": ssl.CERT_NONE},
http_proxy_host=self.proxy_host,
http_proxy_port=self.proxy_port,
header=self.header
)
self.on_connected()
def _disconnect(self):
"""
"""
with self._ws_lock:
# TODO 这个地方是最终调试出来出问题的地方
# OKEX把连接踢掉之后,根本不存在self._ws
# 但是这里的重连机制是,只有_ws的时候,才会重连
# 因此,对Vnpy原生版本的最小化改动为 修改是否存在_ws,重连的时候,都要设置为None
#
if self._ws:
self._ws.close()
self._ws = None
def _run(self):
"""
Keep running till stop is called.
"""
try:
self._connect()
# todo: onDisconnect
while self._active:
try:
ws = self._ws
if ws:
text = ws.recv()
# ws object is closed when recv function is blocking
if not text:
self.write_log("_run: 收不到text")
self._reconnect()
continue
self._record_last_received_text(text)
try:
data = self.unpack_data(text)
except ValueError as e:
self.write_log("_run: 无法解压text")
print("websocket unable to parse data: " + text)
raise e
self.on_packet(data)
self.write_log("_run: 解压的data " + str(data))
# ws is closed before recv function is called
# For socket.error, see Issue #1608
except (websocket.WebSocketConnectionClosedException, socket.error):
self.write_log("_run: WebsocketConnectionClosedException和socket.error")
self._reconnect()
# other internal exception raised in on_packet
except Exception: # noqa
self.write_log("_run: socket.error other wrong")
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._reconnect()
except: # noqa
self.write_log("_run: _connect() except")
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._reconnect()
@staticmethod
def unpack_data(data: str):
"""
Default serialization format is json.
override this method if you want to use other serialization format.
"""
return json.loads(data)
def _run_ping(self):
""""""
while self._active:
try:
self.write_log("_run_ping: send ping")
self._ping()
except Exception: # noqa
self.write_log("_run_ping: wrong reconnect()")
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._reconnect()
for i in range(self.ping_interval):
if not self._active:
break
sleep(1)
def _ping(self):
""""""
ws = self._ws
if ws:
self.write_log("_ping: send ping")
ws.send("ping", websocket.ABNF.OPCODE_PING)
@staticmethod
def on_connected():
"""
Callback when websocket is connected successfully.
"""
pass
@staticmethod
def on_disconnected():
"""
Callback when websocket connection is lost.
"""
pass
@staticmethod
def on_packet(packet: dict):
"""
Callback when receiving data from server.
"""
pass
def on_error(self, exception_type: type, exception_value: Exception, tb):
"""
Callback when exception raised.
"""
sys.stderr.write(
self.exception_detail(exception_type, exception_value, tb)
)
return sys.excepthook(exception_type, exception_value, tb)
def exception_detail(
self, exception_type: type, exception_value: Exception, tb
):
"""
Print detailed exception information.
"""
text = "[{}]: Unhandled WebSocket Error:{}\n".format(
datetime.now().isoformat(), exception_type
)
text += "LastSentText:\n{}\n".format(self._last_sent_text)
text += "LastReceivedText:\n{}\n".format(self._last_received_text)
text += "Exception trace: \n"
text += "".join(
traceback.format_exception(exception_type, exception_value, tb)
)
return text
def _record_last_sent_text(self, text: str):
"""
Record last sent text for debug purpose.
"""
self._last_sent_text = text[:1000]
def _record_last_received_text(self, text: str):
"""
Record last received text for debug purpose.
"""
self._last_received_text = text[:1000]
|
sql_isolation_testcase.py
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pygresql.pg
import os
import subprocess
import re
import multiprocessing
import tempfile
import time
import sys
import socket
from optparse import OptionParser
import traceback
def is_digit(n):
try:
int(n)
return True
except ValueError:
return False
class SQLIsolationExecutor(object):
def __init__(self, dbname=''):
self.processes = {}
# The re.S flag makes the "." in the regex match newlines.
# When matched against a command in process_command(), all
# lines in the command are matched and sent as SQL query.
self.command_pattern = re.compile(r"^(-?\d+|[*])([&\\<\\>Uq]*?)\:(.*)", re.S)
if dbname:
self.dbname = dbname
else:
self.dbname = os.environ.get('PGDATABASE')
class SQLConnection(object):
def __init__(self, out_file, name, utility_mode, dbname):
self.name = name
self.utility_mode = utility_mode
self.out_file = out_file
self.dbname = dbname
parent_conn, child_conn = multiprocessing.Pipe(True)
self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,))
self.pipe = parent_conn
self.has_open = False
self.p.start()
# Close "our" copy of the child's handle, so that if the child dies,
# recv() on the pipe will fail.
child_conn.close();
self.out_file = out_file
def session_process(self, pipe):
sp = SQLIsolationExecutor.SQLSessionProcess(self.name,
self.utility_mode, pipe, self.dbname)
sp.do()
def query(self, command):
print >>self.out_file
self.out_file.flush()
if len(command.strip()) == 0:
return
if self.has_open:
raise Exception("Cannot query command while waiting for results")
self.pipe.send((command, False))
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.strip()
def fork(self, command, blocking):
print >>self.out_file, " <waiting ...>"
self.pipe.send((command, True))
if blocking:
time.sleep(0.5)
if self.pipe.poll(0):
p = self.pipe.recv()
raise Exception("Forked command is not blocking; got output: %s" % p.strip())
self.has_open = True
def join(self):
r = None
print >>self.out_file, " <... completed>"
if self.has_open:
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.strip()
self.has_open = False
def stop(self):
self.pipe.send(("", False))
self.p.join()
if self.has_open:
raise Exception("Should not finish test case while waiting for results")
def quit(self):
print >>self.out_file, "... <quitting>"
self.stop()
def terminate(self):
self.pipe.close()
self.p.terminate()
class SQLSessionProcess(object):
def __init__(self, name, utility_mode, pipe, dbname):
"""
Constructor
"""
self.name = name
self.utility_mode = utility_mode
self.pipe = pipe
self.dbname = dbname
if self.utility_mode:
(hostname, port) = self.get_utility_mode_port(name)
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_session_role=utility")
else:
self.con = self.connectdb(self.dbname)
def connectdb(self, given_dbname, given_host = None, given_port = None, given_opt = None):
con = None
retry = 1000
while retry:
try:
if (given_port is None):
con = pygresql.pg.connect(host= given_host,
opt= given_opt,
dbname= given_dbname)
else:
con = pygresql.pg.connect(host= given_host,
port= given_port,
opt= given_opt,
dbname= given_dbname)
break
except Exception as e:
if (("the database system is starting up" in str(e) or
"the database system is in recovery mode" in str(e)) and
retry > 1):
retry -= 1
time.sleep(0.1)
else:
raise
return con
def get_utility_mode_port(self, name):
"""
Gets the port number/hostname combination of the
contentid = name and role = primary
"""
con = self.connectdb(self.dbname)
r = con.query("SELECT hostname, port FROM gp_segment_configuration WHERE content = %s and role = 'p'" % name).getresult()
if len(r) == 0:
raise Exception("Invalid content %s" % name)
if r[0][0] == socket.gethostname():
return (None, int(r[0][1]))
return (r[0][0], int(r[0][1]))
def printout_result(self, r):
"""
This is a pretty dirty, but apprently the only way
to get the pretty output of the query result.
The reason is that for some python internal reason
print(r) calls the correct function while neighter str(r)
nor repr(r) output something useful.
FIXME: once we upgrade to a modern pygresql this can probably go
away entirely; it looks like 5.0 may have consolidated the
internal print/str code.
"""
with tempfile.TemporaryFile() as f:
print >>f, r
f.seek(0) # rewind
ppr = f.read()
return ppr.strip() + "\n"
def execute_command(self, command):
"""
Executes a given command
"""
try:
r = self.con.query(command)
if r and type(r) == str:
echo_content = command[:-1].partition(" ")[0].upper()
return "%s %s" % (echo_content, self.printout_result(r))
elif r:
return self.printout_result(r)
else:
echo_content = command[:-1].partition(" ")[0].upper()
return echo_content
except Exception as e:
return str(e)
def do(self):
"""
Process loop.
Ends when the command None is received
"""
(c, wait) = self.pipe.recv()
while c:
if wait:
time.sleep(0.1)
r = self.execute_command(c)
self.pipe.send(r)
r = None
(c, wait) = self.pipe.recv()
def get_process(self, out_file, name, utility_mode=False, dbname=""):
"""
Gets or creates the process by the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and not utility_mode and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, utility_mode) in self.processes:
if not dbname:
dbname = self.dbname
self.processes[(name, utility_mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, utility_mode, dbname)
return self.processes[(name, utility_mode)]
def quit_process(self, out_file, name, utility_mode=False, dbname=""):
"""
Quits a process with the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and not utility_mode and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, utility_mode) in self.processes:
raise Exception("Sessions not started cannot be quit")
self.processes[(name, utility_mode)].quit()
del self.processes[(name, utility_mode)]
def get_all_primary_contentids(self, dbname):
"""
Retrieves all primary content IDs (including the master). Intended for
use by *U queries.
"""
if not dbname:
dbname = self.dbname
con = pygresql.pg.connect(dbname=dbname)
result = con.query("SELECT content FROM gp_segment_configuration WHERE role = 'p'").getresult()
if len(result) == 0:
raise Exception("Invalid gp_segment_configuration contents")
return [int(content[0]) for content in result]
def process_command(self, command, output_file):
"""
Processes the given command.
The command at this point still includes the isolation behavior
flags, e.g. which session to use.
"""
process_name = ""
sql = command
flag = ""
dbname = ""
m = self.command_pattern.match(command)
if m:
process_name = m.groups()[0]
flag = m.groups()[1]
sql = m.groups()[2]
sql = sql.lstrip()
# If db_name is specifed , it should be of the following syntax:
# 1:@db_name <db_name>: <sql>
if sql.startswith('@db_name'):
sql_parts = sql.split(':', 2)
if not len(sql_parts) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not sql_parts[0].startswith('@db_name'):
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not len(sql_parts[0].split()) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
dbname = sql_parts[0].split()[1].strip()
if not dbname:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
sql = sql_parts[1]
if not flag:
if sql.startswith('!'):
sql = sql[1:]
# Check for execution mode. E.g.
# !\retcode path/to/executable --option1 --option2 ...
#
# At the moment, we only recognize the \retcode mode, which
# ignores all program output in the diff (it's still printed)
# and adds the return code.
mode = None
if sql.startswith('\\'):
mode, sql = sql.split(None, 1)
if mode != '\\retcode':
raise Exception('Invalid execution mode: {}'.format(mode))
cmd_output = subprocess.Popen(sql.strip(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
stdout, _ = cmd_output.communicate()
print >> output_file
if mode == '\\retcode':
print >> output_file, '-- start_ignore'
print >> output_file, stdout
if mode == '\\retcode':
print >> output_file, '-- end_ignore'
print >> output_file, '(exited with code {})'.format(cmd_output.returncode)
else:
self.get_process(output_file, process_name, dbname=dbname).query(sql.strip())
elif flag == "&":
self.get_process(output_file, process_name, dbname=dbname).fork(sql.strip(), True)
elif flag == ">":
self.get_process(output_file, process_name, dbname=dbname).fork(sql.strip(), False)
elif flag == "<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, dbname=dbname).join()
elif flag == "q":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, dbname=dbname)
elif flag == "U":
if process_name == '*':
process_names = [str(content) for content in self.get_all_primary_contentids(dbname)]
else:
process_names = [process_name]
for name in process_names:
self.get_process(output_file, name, utility_mode=True, dbname=dbname).query(sql.strip())
elif flag == "U&":
self.get_process(output_file, process_name, utility_mode=True, dbname=dbname).fork(sql.strip(), True)
elif flag == "U<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, utility_mode=True, dbname=dbname).join()
elif flag == "Uq":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, utility_mode=True, dbname=dbname)
else:
raise Exception("Invalid isolation flag")
def process_isolation_file(self, sql_file, output_file):
"""
Processes the given sql file and writes the output
to output file
"""
try:
command = ""
for line in sql_file:
#tinctest.logger.info("re.match: %s" %re.match(r"^\d+[q\\<]:$", line))
print >>output_file, line.strip(),
if line[0] == "!":
command_part = line # shell commands can use -- for multichar options like --include
else:
command_part = line.partition("--")[0] # remove comment from line
if command_part == "" or command_part == "\n":
print >>output_file
elif command_part.endswith(";\n") or re.match(r"^\d+[q\\<]:$", line) or re.match(r"^-?\d+U[q\\<]:$", line):
command += command_part
try:
self.process_command(command, output_file)
except Exception as e:
print >>output_file, "FAILED: ", e
command = ""
else:
command += command_part
for process in self.processes.values():
process.stop()
except:
for process in self.processes.values():
process.terminate()
raise
finally:
for process in self.processes.values():
process.terminate()
class SQLIsolationTestCase:
"""
The isolation test case allows a fine grained control of interleaved
executing transactions. This is mainly used to test isolation behavior.
[<#>[flag]:] <sql> | ! <shell scripts or command>
#: either an integer indicating an unique session, or a content-id if
followed by U (for utility-mode connections). In 'U' mode, the
content-id can alternatively be an asterisk '*' to perform a
utility-mode query on the master and all primaries.
flag:
&: expect blocking behavior
>: running in background without blocking
<: join an existing session
q: quit the given session
U: connect in utility mode to primary contentid from gp_segment_configuration
U&: expect blocking behavior in utility mode (does not currently support an asterisk target)
U<: join an existing utility mode session (does not currently support an asterisk target)
An example is:
Execute BEGIN in transaction 1
Execute BEGIN in transaction 2
Execute INSERT in transaction 2
Execute SELECT in transaction 1
Execute COMMIT in transaction 2
Execute SELECT in transaction 1
The isolation tests are specified identical to sql-scripts in normal
SQLTestCases. However, it is possible to prefix a SQL line with
an tranaction identifier followed by a colon (":").
The above example would be defined by
1: BEGIN;
2: BEGIN;
2: INSERT INTO a VALUES (1);
1: SELECT * FROM a;
2: COMMIT;
1: SELECT * FROM a;
Blocking behavior can be tested by forking and joining.
1: BEGIN;
2: BEGIN;
1: DELETE FROM foo WHERE a = 4;
2&: DELETE FROM foo WHERE a = 4;
1: COMMIT;
2<:
2: COMMIT;
2& forks the command. It is executed in the background. If the
command is NOT blocking at this point, it is considered an error.
2< joins the background command and outputs the result of the
command execution.
Session ids should be smaller than 1024.
2U: Executes a utility command connected to port 40000.
One difference to SQLTestCase is the output of INSERT.
SQLTestCase would output "INSERT 0 1" if one tuple is inserted.
SQLIsolationTestCase would output "INSERT 1". As the
SQLIsolationTestCase needs to have a more fine-grained control
over the execution order than possible with PSQL, it uses
the pygresql python library instead.
Connecting to a specific database:
1. If you specify a db_name metadata in the sql file, connect to that database in all open sessions.
2. If you want a specific session to be connected to a specific database , specify the sql as follows:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
etc
Here session 1 will be connected to testdb and session 2 will be connected to test2db. You can specify @db_name only at the beginning of the session. For eg:, following would error out:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: @db_name testdb: <sql>
2: <sql>
etc
Quitting sessions:
By default, all opened sessions will be stopped only at the end of the sql file execution. If you want to explicitly quit a session
in the middle of the test execution, you can specify a flag 'q' with the session identifier. For eg:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
1q:
2: <sql>
3: <sql>
2q:
3: <sql>
2: @db_name test: <sql>
1q: ---> Will quit the session established with testdb.
2q: ---> Will quit the session established with test2db.
The subsequent 2: @db_name test: <sql> will open a new session with the database test and execute the sql against that session.
Catalog Modification:
Some tests are easier to write if it's possible to modify a system
catalog across the *entire* cluster. To perform a utility-mode query on
all segments and the master, you can use *U commands:
*U: SET allow_system_table_mods = true;
*U: UPDATE pg_catalog.<table> SET <column> = <value> WHERE <cond>;
Since the number of query results returned by a *U command depends on
the developer's cluster configuration, it can be useful to wrap them in
a start_/end_ignore block. (Unfortunately, this also hides legitimate
failures; a better long-term solution is needed.)
Block/join flags are not currently supported with *U.
"""
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None):
"""
Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql
against the test case database (self.db_name) and verifies the output with the ans file.
If an 'init_file' exists in the same location as the sql_file, this will be used
while doing gpdiff.
"""
# Add gucs to the test sql and form the actual sql file to be run
if not out_dir:
out_dir = self.get_out_dir()
if not os.path.exists(out_dir):
TINCSystem.make_dirs(out_dir, ignore_exists_error = True)
if optimizer is None:
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file))
else:
# sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer)))
self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer)
self.test_artifacts.append(gucs_sql_file)
if not out_file:
if optimizer is None:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
else:
# out file will be *_opt.out or *_planner.out based on optimizer
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer)))
self.test_artifacts.append(out_file)
executor = SQLIsolationExecutor(dbname=self.db_name)
with open(out_file, "w") as f:
executor.process_isolation_file(open(sql_file), f)
f.flush()
if out_file[-2:] == '.t':
out_file = out_file[:-2]
return out_file
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--dbname", dest="dbname",
help="connect to database DBNAME", metavar="DBNAME")
(options, args) = parser.parse_args()
executor = SQLIsolationExecutor(dbname=options.dbname)
executor.process_isolation_file(sys.stdin, sys.stdout)
|
run_unittests.py
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import urllib.error
import urllib.request
import zipfile
import hashlib
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from mesonbuild._pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import typing as T
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.dependencies.base
import mesonbuild.compilers
import mesonbuild.envconfig
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, python_command, version_compare, split_args,
quote_arg, relpath, is_linux, git, GIT
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
import mesonbuild.dependencies.base
from mesonbuild.build import Target, ConfigurationData
import mesonbuild.modules.pkgconfig
from mesonbuild.mtest import TAPParser, TestResult
from mesonbuild.wrap.wrap import PackageDefinition, WrapException
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
URLOPEN_TIMEOUT = 5
@contextmanager
def chdir(path: str):
curdir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(curdir)
def get_dynamic_section_entry(fname, entry):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return m.group(1)
return None # The file did not contain the specified entry.
def get_soname(fname):
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname):
return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def is_pull():
# Travis
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return True
# Azure
if 'SYSTEM_PULLREQUEST_ISFORK' in os.environ:
return True
return False
def _git_init(project_dir):
subprocess.check_call(['git', 'init'], cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if feature not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10'), '2016.10')
self.assertEqual(searchfunc('2016.10 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('oops v1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.oops 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.x'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class_none_flush(self):
cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock())
a = cc.compiler_args(['-I.'])
#first we are checking if the tree construction deduplicates the correct -I argument
a += ['-I..']
a += ['-I./tests/']
a += ['-I./tests2/']
#think this here as assertion, we cannot apply it, otherwise the CompilerArgs would already flush the changes:
# assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..', '-I.'])
a += ['-I.']
a += ['-I.', '-I./tests/']
self.assertEqual(a, ['-I.', '-I./tests/', '-I./tests2/', '-I..'])
#then we are checking that when CompilerArgs already have a build container list, that the deduplication is taking the correct one
a += ['-I.', '-I./tests2/']
self.assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..'])
def test_compiler_args_class_d(self):
d = mesonbuild.compilers.DmdDCompiler([], 'fake', MachineChoice.HOST, 'info', 'arch')
# check include order is kept when deduplicating
a = d.compiler_args(['-Ifirst', '-Isecond', '-Ithird'])
a += ['-Ifirst']
self.assertEqual(a, ['-Ifirst', '-Isecond', '-Ithird'])
def test_compiler_args_class_clike(self):
cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock())
# Test that empty initialization works
a = cc.compiler_args()
self.assertEqual(a, [])
# Test that list initialization works
a = cc.compiler_args(['-I.', '-I..'])
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cc.compiler_args(['-I.', '-I.']), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
def test_unholder(self):
unholder = mesonbuild.mesonlib.unholder
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
holders = [holder1, holder3]
self.assertEqual(1, unholder(holder1))
self.assertEqual([1], unholder([holder1]))
self.assertEqual([1, 3], unholder(holders))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
# flatten nested lists
kwargs = {'sources': [1, [2, [3]]]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
def test_pkgconfig_module(self):
dummystate = mock.Mock()
dummystate.subproject = 'dummy'
_mock = mock.Mock(spec=mesonbuild.dependencies.ExternalDependency)
_mock.pcdep = mock.Mock()
_mock.pcdep.name = "some_name"
_mock.version_reqs = []
_mock = mock.Mock(held_object=_mock)
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_libs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_reqs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc._find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
@skipIfNoPkgconfig
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.compiler_options.host['c']['link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[0] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix()), ''
if args[0] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix()), ''
if args[0] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix()), ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(deps, expdeps)
def test_log_once(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once('foo')
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual, 'foo', actual)
def test_log_once_ansi(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
f.truncate()
mesonbuild.mlog.warning('bar', once=True)
mesonbuild.mlog.warning('bar', once=True)
actual = f.getvalue().strip()
self.assertEqual(actual.count('bar'), 1, actual)
def test_sort_libpaths(self):
sort_libpaths = mesonbuild.dependencies.base.sort_libpaths
self.assertEqual(sort_libpaths(
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
def test_dependency_factory_order(self):
b = mesonbuild.dependencies.base
with tempfile.TemporaryDirectory() as tmpdir:
with chdir(tmpdir):
env = get_fake_env()
env.scratch_dir = tmpdir
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.PKGCONFIG, b.DependencyMethods.CMAKE]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['pkgconfig', 'cmake'])
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.CMAKE, b.DependencyMethods.PKGCONFIG]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['cmake', 'pkgconfig'])
def test_validate_json(self) -> None:
"""Validate the json schema for the test cases."""
try:
from jsonschema import validate, ValidationError
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('Python jsonschema module not found.')
with Path('data/test.schema.json').open() as f:
schema = json.load(f)
errors = [] # type: T.Tuple[str, Exception]
for p in Path('test cases').glob('**/test.json'):
with p.open() as f:
try:
validate(json.load(f), schema=schema)
except ValidationError as e:
errors.append((p.resolve(), e))
for f, e in errors:
print('Failed to validate: "{}"'.format(f))
print(str(e))
self.assertFalse(errors)
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options().keys():
self.assertIn(opt, md)
for opt in comp.base_options:
self.assertIn(opt, md)
self.assertNotIn('b_unknown', md)
@staticmethod
def _get_section_content(name, sections, md):
for section in sections:
if section and section.group(1) == name:
try:
next_section = next(sections)
end = next_section.start()
except StopIteration:
end = len(md)
# Extract the content for this section
return md[section.end():end]
raise RuntimeError('Could not find "{}" heading'.format(name))
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
from itertools import tee
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
# Extract the content for this section
content = self._get_section_content("Universal options", sections, md)
subsections = tee(re.finditer(r"^### (.+)$", content, re.MULTILINE))
subcontent1 = self._get_section_content("Directories", subsections[0], content)
subcontent2 = self._get_section_content("Core options", subsections[1], content)
for subcontent in (subcontent1, subcontent2):
# Find the option names
options = set()
# Match either a table row or a table heading separator: | ------ |
rows = re.finditer(r"^\|(?: (\w+) .* | *-+ *)\|", subcontent, re.MULTILINE)
# Skip the header of the first table
next(rows)
# Skip the heading separator of the first table
next(rows)
for m in rows:
value = m.group(1)
# End when the `buildtype` table starts
if value is None:
break
options.add(value)
self.assertEqual(len(found_entries & options), 0)
found_entries |= options
self.assertEqual(found_entries, set([
*mesonbuild.coredata.BUILTIN_OPTIONS.keys(),
*mesonbuild.coredata.BUILTIN_OPTIONS_PER_MACHINE.keys()
]))
# Check that `buildtype` table inside `Core options` matches how
# setting of builtin options behaves
#
# Find all tables inside this subsection
tables = re.finditer(r"^\| (\w+) .* \|\n\| *[-|\s]+ *\|$", subcontent2, re.MULTILINE)
# Get the table we want using the header of the first column
table = self._get_section_content('buildtype', tables, subcontent2)
# Get table row data
rows = re.finditer(r"^\|(?: (\w+)\s+\| (\w+)\s+\| (\w+) .* | *-+ *)\|", table, re.MULTILINE)
env = get_fake_env()
for m in rows:
buildtype, debug, opt = m.groups()
if debug == 'true':
debug = True
elif debug == 'false':
debug = False
else:
raise RuntimeError('Invalid debug value {!r} in row:\n{}'.format(debug, m.group()))
env.coredata.set_builtin_option('buildtype', buildtype)
self.assertEqual(env.coredata.builtins['buildtype'].value, buildtype)
self.assertEqual(env.coredata.builtins['optimization'].value, opt)
self.assertEqual(env.coredata.builtins['debug'].value, debug)
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
content = self._get_section_content("CPU families", sections, md)
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
@unittest.skipIf(is_pull(), 'Skipping because this is a pull request')
def test_json_grammar_syntax_highlighting(self):
'''
Ensure that syntax highlighting JSON grammar written by TingPing was
updated for new functions in the global namespace in build files.
https://github.com/TingPing/language-meson/
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
url = 'https://raw.githubusercontent.com/TingPing/language-meson/master/grammars/meson.json'
try:
# Use a timeout to avoid blocking forever in case the network is
# slow or unavailable in a weird way
r = urllib.request.urlopen(url, timeout=URLOPEN_TIMEOUT)
except urllib.error.URLError as e:
# Skip test when network is not available, such as during packaging
# by a distro or Flatpak
if not isinstance(e, urllib.error.HTTPError):
raise unittest.SkipTest('Network unavailable')
# Don't fail the test if github is down, but do fail if 4xx
if e.code >= 500:
raise unittest.SkipTest('Server error ' + str(e.code))
raise e
# On Python 3.5, we must decode bytes to string. Newer versions don't require that.
grammar = json.loads(r.read().decode('utf-8', 'surrogatepass'))
for each in grammar['patterns']:
if 'name' in each and each['name'] == 'support.function.builtin.meson':
# The string is of the form: (?x)\\b(func1|func2|...\n)\\b\\s*(?=\\() and
# we convert that to [func1, func2, ...] without using regex to parse regex
funcs = set(each['match'].split('\\b(')[1].split('\n')[0].split('|'))
if 'name' in each and each['name'] == 'support.variable.meson':
# \\b(builtin1|builtin2...)\\b
builtin = set(each['match'].split('\\b(')[1].split(')\\b')[0].split('|'))
self.assertEqual(builtin, set(interp.builtin.keys()))
self.assertEqual(funcs, set(interp.funcs.keys()))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
def test_mesondata_is_up_to_date(self):
from mesonbuild.mesondata import mesondata
err_msg = textwrap.dedent('''
###########################################################
### mesonbuild.mesondata is not up-to-date ###
### Please regenerate it by running tools/gen_data.py ###
###########################################################
''')
root_dir = Path(__file__).resolve().parent
mesonbuild_dir = root_dir / 'mesonbuild'
data_dirs = mesonbuild_dir.glob('**/data')
data_files = [] # type: T.List[T.Tuple(str, str)]
for i in data_dirs:
for p in i.iterdir():
data_files += [(p.relative_to(mesonbuild_dir).as_posix(), hashlib.sha256(p.read_bytes()).hexdigest())]
from pprint import pprint
current_files = set(mesondata.keys())
scanned_files = set([x[0] for x in data_files])
self.assertSetEqual(current_files, scanned_files, err_msg + 'Data files were added or removed\n')
errors = []
for i in data_files:
if mesondata[i[0]].sha256sum != i[1]:
errors += [i[0]]
self.assertListEqual(errors, [], err_msg + 'Files were changed')
class BasePlatformTests(unittest.TestCase):
prefix = '/usr'
libdir = 'lib'
def setUp(self):
super().setUp()
self.maxDiff = None
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_native_file = None
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
self.linuxlike_test_dir = os.path.join(src_root, 'test cases/linuxlike')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None,
workdir=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix]
if self.libdir:
args += ['--libdir', self.libdir]
if self.meson_native_file:
args += ['--native-file', self.meson_native_file]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args, override_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars, workdir=workdir)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
with mock.patch.dict(os.environ, override_envvars):
run_mtest_inprocess(['-C', self.builddir])
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def get_meson_log_sanitychecks(self):
'''
Same as above, but for the sanity checks that were run
'''
log = self.get_meson_log()
prefix = 'Sanity check compiler command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertReconfiguredBuildIsNoop(self):
'Assert that we reconfigured and then there was nothing to do'
ret = self.build()
self.assertIn('The Meson build system', ret)
if self.backend is Backend.ninja:
for line in ret.split('\n'):
if line in self.no_rebuild_stdout:
break
else:
raise AssertionError('build was reconfigured, but was not no-op')
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
# XXX: Note CustomBuild did indeed rebuild, because of the regen checker!
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target of each type said that no rebuild was done
# We always have at least one CustomBuild target for the regen checker
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('CustomBuild:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
@staticmethod
def get_target_from_filename(filename):
base = os.path.splitext(filename)[0]
if base.startswith(('lib', 'cyg')):
return base[3:]
return base
def assertBuildRelinkedOnlyTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
linked_targets = []
for line in ret.split('\n'):
if 'Linking target' in line:
fname = line.rsplit('target ')[-1]
linked_targets.append(self.get_target_from_filename(fname))
self.assertEqual(linked_targets, [target])
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile(r'Link:\n [^\n]*link.exe[^\n]*/OUT:".\\([^"]*)"', flags=re.IGNORECASE)
matches = linkre.findall(ret)
self.assertEqual(len(matches), 1, msg=matches)
self.assertEqual(self.get_target_from_filename(matches[0]), target)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
self.init(testdir, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_do_conf_file_by_format(self):
def conf_str(in_data, confdata, vformat):
(result, missing_variables, confdata_useless) = mesonbuild.mesonlib.do_conf_str(in_data, confdata, variable_format = vformat)
return '\n'.join(result)
def check_formats(confdata, result):
self.assertEqual(conf_str(['#mesondefine VAR'], confdata, 'meson'), result)
self.assertEqual(conf_str(['#cmakedefine VAR ${VAR}'], confdata, 'cmake'), result)
self.assertEqual(conf_str(['#cmakedefine VAR @VAR@'], confdata, 'cmake@'), result)
confdata = ConfigurationData()
# Key error as they do not exists
check_formats(confdata, '/* #undef VAR */\n')
# Check boolean
confdata.values = {'VAR': (False, 'description')}
check_formats(confdata, '#undef VAR\n')
confdata.values = {'VAR': (True, 'description')}
check_formats(confdata, '#define VAR\n')
# Check string
confdata.values = {'VAR': ('value', 'description')}
check_formats(confdata, '#define VAR value\n')
# Check integer
confdata.values = {'VAR': (10, 'description')}
check_formats(confdata, '#define VAR 10\n')
# Check multiple string with cmake formats
confdata.values = {'VAR': ('value', 'description')}
self.assertEqual(conf_str(['#cmakedefine VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value')
self.assertEqual(conf_str(['#cmakedefine VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value')
# Handles meson format exceptions
# Unknown format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'unknown_format')
# More than 2 params in mesondefine
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'meson')
# Mismatched line with format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#cmakedefine VAR'], confdata, 'meson')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake@')
# Dict value in confdata
confdata.values = {'VAR': (['value'], 'description')}
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'meson')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
# on Windows, /someabs is *not* an absolute path
prefix = 'x:/someabs' if is_windows() else '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
if is_windows():
args = ['--prefix', 'x:/opt', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
if is_windows():
args = ['--prefix', 'x:/usr', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
if is_windows():
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=x:/opt', False)
else:
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
if mesonbuild.mesonlib.default_prefix() == '/usr/local':
expected[None] = expected['/usr/local']
for prefix in expected:
args = []
if prefix:
args += ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '168 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir)
self.run_target('check_exists')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '144 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
logged = list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
for name in logged:
self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name))
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name))
self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_force_fallback_for(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--force-fallback-for=zlib,foo'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '63 test env does not stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt')) as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '133 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
if mesonbuild.environment.detect_msys2_arch() and ('MESON_RSP_THRESHOLD' in os.environ):
raise unittest.SkipTest('Test does not yet support gcc rsp files on msys2')
testdir = os.path.join(self.common_test_dir, '134 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# Need to run the build so the private dir is created.
self.build()
pdirs = glob(os.path.join(self.builddir, 'sub4/someexe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], "-I" + privdir)
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
pdirs = glob(os.path.join(self.builddir, 'somefxe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], '-I' + privdir)
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows() and platform.machine().lower() != 'e2k':
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_sunos():
self.assertIsInstance(cc.linker, (mesonbuild.linkers.SolarisDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin))
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
wrappercc_s = ''
for w in wrappercc:
wrappercc_s += quote_arg(w) + ' '
os.environ[evar] = wrappercc_s
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
wrapperlinker_s = ''
for w in wrapperlinker:
wrapperlinker_s += quote_arg(w) + ' '
os.environ['AR'] = wrapperlinker_s
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '137 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '136 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define)
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '113 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_noop_changes_cause_no_rebuilds(self):
'''
Test that no-op changes to the build files such as mtime do not cause
a rebuild of anything.
'''
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of meson.build should not rebuild anything
self.utime(os.path.join(testdir, 'meson.build'))
self.assertReconfiguredBuildIsNoop()
# Changing mtime of libefile.c should rebuild the library, but not relink the executable
self.utime(os.path.join(testdir, 'libfile.c'))
self.assertBuildRelinkedOnlyTarget('mylib')
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertBuildRelinkedOnlyTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '60 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertBuildRelinkedOnlyTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '94 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def has_working_hg(self):
if not shutil.which('hg'):
return False
try:
# This check should not be necessary, but
# CI under macOS passes the above test even
# though Mercurial is not installed.
if subprocess.call(['hg', '--version'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
return False
return True
except FileNotFoundError:
return False
def test_dist_hg(self):
if not self.has_working_hg():
raise unittest.SkipTest('Mercurial not found or broken.')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init, include_subprojects=False)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def create_dummy_subproject(self, project_dir, name):
path = os.path.join(project_dir, 'subprojects', name)
os.makedirs(path)
with open(os.path.join(path, 'meson.build'), 'w') as ofile:
ofile.write("project('{}')".format(name))
return path
def dist_impl(self, vcs_init, include_subprojects=True):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write('''project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
subproject('vcssub', required : false)
subproject('tarballsub', required : false)
''')
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write('''#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
''')
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
if include_subprojects:
vcs_init(self.create_dummy_subproject(project_dir, 'vcssub'))
self.create_dummy_subproject(project_dir, 'tarballsub')
self.create_dummy_subproject(project_dir, 'unusedsub')
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
if include_subprojects:
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c']),
sorted(z.namelist()))
self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'],
workdir=self.builddir)
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/subprojects/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c',
'disttest-1.4.3/subprojects/tarballsub/',
'disttest-1.4.3/subprojects/vcssub/',
'disttest-1.4.3/subprojects/tarballsub/meson.build',
'disttest-1.4.3/subprojects/vcssub/meson.build']),
sorted(z.namelist()))
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '42 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '154 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '154 reserved targets')
targets = mesonbuild.coredata.FORBIDDEN_TARGET_NAMES
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
@mock.patch.dict(os.environ)
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix(), '-DLIBFOO']
# pkg-config and pkgconf does not respect the same order
self.assertEqual(sorted(foo_dep.get_compile_args()), sorted(cargs))
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '43 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
def test_options_with_choices_changing(self) -> None:
"""Detect when options like arrays or combos have their choices change."""
testdir = Path(os.path.join(self.unit_test_dir, '84 change option choices'))
options1 = str(testdir / 'meson_options.1.txt')
options2 = str(testdir / 'meson_options.2.txt')
# Test that old options are changed to the new defaults if they are not valid
real_options = str(testdir / 'meson_options.txt')
self.addCleanup(os.unlink, real_options)
shutil.copy(options1, real_options)
self.init(str(testdir))
shutil.copy(options2, real_options)
self.build()
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == 'combo':
self.assertEqual(item['value'], 'b')
self.assertEqual(item['choices'], ['b', 'c', 'd'])
elif item['name'] == 'arr':
self.assertEqual(item['value'], ['b'])
self.assertEqual(item['choices'], ['b', 'c', 'd'])
self.wipe()
# When the old options are valid they should remain
shutil.copy(options1, real_options)
self.init(str(testdir), extra_args=['-Dcombo=c', '-Darray=b,c'])
shutil.copy(options2, real_options)
self.build()
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == 'combo':
self.assertEqual(item['value'], 'c')
self.assertEqual(item['choices'], ['b', 'c', 'd'])
elif item['name'] == 'arr':
self.assertEqual(item['value'], ['b', 'c'])
self.assertEqual(item['choices'], ['b', 'c', 'd'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'], cwd=workdir)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
self.new_builddir()
out = self.init(tdir, workdir=wd)
expected = os.path.join(relpath(tdir, self.src_root), 'meson.build')
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, out)
def test_error_location_path(self):
'''Test locations in meson errors contain correct paths'''
# this list contains errors from all the different steps in the
# lexer/parser/interpreter we have tests for.
for (t, f) in [
('10 out of bounds', 'meson.build'),
('18 wrong plusassign', 'meson.build'),
('61 bad option argument', 'meson_options.txt'),
('102 subdir parse error', os.path.join('subdir', 'meson.build')),
('103 invalid option file', 'meson_options.txt'),
]:
tdir = os.path.join(self.src_root, 'test cases', 'failing', t)
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
try:
self.init(tdir, workdir=wd)
except subprocess.CalledProcessError as e:
expected = os.path.join('test cases', 'failing', t, f)
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, e.output)
else:
self.fail('configure unexpectedly succeeded')
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
try:
env.detect_cpp_compiler(MachineChoice.HOST)
langs.append('cpp')
except EnvironmentException:
pass
try:
env.detect_cs_compiler(MachineChoice.HOST)
langs.append('cs')
except EnvironmentException:
pass
try:
env.detect_d_compiler(MachineChoice.HOST)
langs.append('d')
except EnvironmentException:
pass
try:
env.detect_java_compiler(MachineChoice.HOST)
langs.append('java')
except EnvironmentException:
pass
try:
env.detect_cuda_compiler(MachineChoice.HOST)
langs.append('cuda')
except EnvironmentException:
pass
try:
env.detect_fortran_compiler(MachineChoice.HOST)
langs.append('fortran')
except EnvironmentException:
pass
try:
env.detect_objc_compiler(MachineChoice.HOST)
langs.append('objc')
except EnvironmentException:
pass
try:
env.detect_objcpp_compiler(MachineChoice.HOST)
langs.append('objcpp')
except EnvironmentException:
pass
# FIXME: omitting rust as Windows AppVeyor CI finds Rust but doesn't link correctly
if not is_windows():
try:
env.detect_rust_compiler(MachineChoice.HOST)
langs.append('rust')
except EnvironmentException:
pass
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in ('c', 'cpp', 'd'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main(void) {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
elif lang in ('java'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'Foo.' + lang), 'w') as f:
f.write('public class Foo { public static void main() {} }')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '177 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '186 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('''WARNING: target links against shared modules. This is not
recommended as it is not supported on some platforms''')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises(subprocess.CalledProcessError) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('passed as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args that affect the
# configuration, and as a bonus, test that --profile-self works.
self.init(testdir, extra_args=['--profile-self'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'static')
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.assertEqual(obj.user_options['set_sub_opt'].value, True)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['-Dfoo'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf('-Dfoo')
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['foo bar', 'one', 'two'])
self.wipe()
self.init(testdir, extra_args=['-Dset_percent_opt=myoption%'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['set_percent_opt'].value, 'myoption%')
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '214 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'plain')
self.assertEqual(opts['optimization'], '0')
# Setting optimizations to 3 should cause buildtype
# to go to release mode.
self.setconf('-Doptimization=3')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'release')
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['optimization'], '3')
# Going to debug build type should reset debugging
# and optimization
self.setconf('-Dbuildtype=debug')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '0')
# Command-line parsing of buildtype settings should be the same as
# setting with `meson configure`.
#
# Setting buildtype should set optimization/debug
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=debugoptimized'])
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
# Setting optimization/debug should set buildtype
self.new_builddir()
self.init(testdir, extra_args=['-Doptimization=2', '-Ddebug=true'])
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
# Setting both buildtype and debug on the command-line should work, and
# should warn not to do that. Also test that --debug is parsed as -Ddebug=true
self.new_builddir()
out = self.init(testdir, extra_args=['-Dbuildtype=debugoptimized', '--debug'])
self.assertRegex(out, 'Recommend using either.*buildtype.*debug.*redundant')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_pkg_config_libdir(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = 'pkg-config'
[properties]
pkg_config_libdir = ['{0}']
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '161 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '35 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '43 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '46 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '102 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': '1.0'
},
{
'descriptive_name': 'sub_implicit',
'name': 'sub_implicit',
'version': '1.0',
},
{
'descriptive_name': 'sub-novar',
'name': 'sub_novar',
'version': '1.0',
},
{
'descriptive_name': 'subsub',
'name': 'subsub',
'version': 'undefined'
},
{
'descriptive_name': 'subsubsub',
'name': 'subsubsub',
'version': 'undefined'
},
]
}
res['subprojects'] = sorted(res['subprojects'], key=lambda i: i['name'])
self.assertDictEqual(expected, res)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '78 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '78 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name))
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
dummydir = os.path.join(testdir, 'dummydir.h')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
self.assertNotIn(dummydir, out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_buildoptions_cross_only(self):
testdir = os.path.join(self.unit_test_dir, '83 cross only introspect')
testfile = os.path.join(testdir, 'meson.build')
res = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
optnames = [o['name'] for o in res]
self.assertIn('c_args', optnames)
self.assertNotIn('build.c_args', optnames)
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj):
for i in key_type_list:
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
('protocol', str),
('depends', list),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('version', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Match target ids to input and output files for ease of reference
src_to_id = {}
out_to_id = {}
for i in res['targets']:
print(json.dump(i, sys.stdout))
out_to_id.update({os.path.relpath(out, self.builddir): i['id']
for out in i['filename']})
for group in i['target_sources']:
src_to_id.update({os.path.relpath(src, testdir): i['id']
for src in group['sources']})
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
deps_to_find = {'test case 1': [src_to_id['t1.cpp']],
'test case 2': [src_to_id['t2.cpp'], src_to_id['t3.cpp']],
'benchmark 1': [out_to_id['file2'], src_to_id['t3.cpp']]}
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertEqual(sorted(i['depends']),
sorted(deps_to_find[i['name']]))
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
for idx, i in enumerate(res1):
if i['name'] == 'cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'buildtype':
res1[idx]['value'] = 'release'
if i['name'] == 'optimization':
res1[idx]['value'] = '3'
if i['name'] == 'debug':
res1[idx]['value'] = False
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
res_wb = [i for i in res_wb if i['type'] != 'custom']
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_ast_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--ast'] + self.meson_args)
node_counter = {}
def accept_node(json_node):
self.assertIsInstance(json_node, dict)
for i in ['lineno', 'colno', 'end_lineno', 'end_colno']:
self.assertIn(i, json_node)
self.assertIsInstance(json_node[i], int)
self.assertIn('node', json_node)
n = json_node['node']
self.assertIsInstance(n, str)
self.assertIn(n, nodes)
if n not in node_counter:
node_counter[n] = 0
node_counter[n] = node_counter[n] + 1
for nodeDesc in nodes[n]:
key = nodeDesc[0]
func = nodeDesc[1]
self.assertIn(key, json_node)
if func is None:
tp = nodeDesc[2]
self.assertIsInstance(json_node[key], tp)
continue
func(json_node[key])
def accept_node_list(node_list):
self.assertIsInstance(node_list, list)
for i in node_list:
accept_node(i)
def accept_kwargs(kwargs):
self.assertIsInstance(kwargs, list)
for i in kwargs:
self.assertIn('key', i)
self.assertIn('val', i)
accept_node(i['key'])
accept_node(i['val'])
nodes = {
'BooleanNode': [('value', None, bool)],
'IdNode': [('value', None, str)],
'NumberNode': [('value', None, int)],
'StringNode': [('value', None, str)],
'ContinueNode': [],
'BreakNode': [],
'ArgumentNode': [('positional', accept_node_list), ('kwargs', accept_kwargs)],
'ArrayNode': [('args', accept_node)],
'DictNode': [('args', accept_node)],
'EmptyNode': [],
'OrNode': [('left', accept_node), ('right', accept_node)],
'AndNode': [('left', accept_node), ('right', accept_node)],
'ComparisonNode': [('left', accept_node), ('right', accept_node), ('ctype', None, str)],
'ArithmeticNode': [('left', accept_node), ('right', accept_node), ('op', None, str)],
'NotNode': [('right', accept_node)],
'CodeBlockNode': [('lines', accept_node_list)],
'IndexNode': [('object', accept_node), ('index', accept_node)],
'MethodNode': [('object', accept_node), ('args', accept_node), ('name', None, str)],
'FunctionNode': [('args', accept_node), ('name', None, str)],
'AssignmentNode': [('value', accept_node), ('var_name', None, str)],
'PlusAssignmentNode': [('value', accept_node), ('var_name', None, str)],
'ForeachClauseNode': [('items', accept_node), ('block', accept_node), ('varnames', None, list)],
'IfClauseNode': [('ifs', accept_node_list), ('else', accept_node)],
'IfNode': [('condition', accept_node), ('block', accept_node)],
'UMinusNode': [('right', accept_node)],
'TernaryNode': [('condition', accept_node), ('true', accept_node), ('false', accept_node)],
}
accept_node(res_nb)
for n, c in [('ContinueNode', 2), ('BreakNode', 1), ('NotNode', 3)]:
self.assertIn(n, node_counter)
self.assertEqual(node_counter[n], c)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'version': ['>=1.2.3'],
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'version': ['>=1.0.0', '<=99.9.9'],
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
def test_summary(self):
testdir = os.path.join(self.unit_test_dir, '73 summary')
out = self.init(testdir)
expected = textwrap.dedent(r'''
Some Subproject 2.0
string: bar
integer: 1
boolean: True
My Project 1.0
Configuration
Some boolean: False
Another boolean: True
Some string: Hello World
A list: string
1
True
empty list:
A number: 1
yes: YES
no: NO
coma list: a, b, c
Plugins
long coma list: alpha, alphacolor, apetag, audiofx, audioparsers, auparse,
autodetect, avi
Subprojects
sub: YES
sub2: NO Problem encountered: This subproject failed
''')
expected_lines = expected.split('\n')[1:]
out_start = out.find(expected_lines[0])
out_lines = out[out_start:].split('\n')[:len(expected_lines)]
if sys.version_info < (3, 7, 0):
# Dictionary order is not stable in Python <3.7, so sort the lines
# while comparing
self.assertEqual(sorted(expected_lines), sorted(out_lines))
else:
self.assertEqual(expected_lines, out_lines)
def test_meson_compile(self):
"""Test the meson compile command."""
def get_exe_name(basename: str) -> str:
if is_windows():
return '{}.exe'.format(basename)
else:
return basename
def get_shared_lib_name(basename: str) -> str:
if mesonbuild.environment.detect_msys2_arch():
return 'lib{}.dll'.format(basename)
elif is_windows():
return '{}.dll'.format(basename)
elif is_cygwin():
return 'cyg{}.dll'.format(basename)
elif is_osx():
return 'lib{}.dylib'.format(basename)
else:
return 'lib{}.so'.format(basename)
def get_static_lib_name(basename: str) -> str:
return 'lib{}.a'.format(basename)
# Base case (no targets or additional arguments)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
self._run([*self.meson_command, 'compile', '-C', self.builddir])
self.assertPathExists(os.path.join(self.builddir, get_exe_name('trivialprog')))
# `--clean`
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--clean'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
# Target specified in a project with unique names
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--wipe'])
# Multiple targets and target type specified
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'mylib', 'mycpplib:shared_library'])
# Check that we have a shared lib, but not an executable, i.e. check that target actually worked
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mylib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('prog')))
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mycpplib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('cppprog')))
# Target specified in a project with non unique names
testdir = os.path.join(self.common_test_dir, '190 same target name')
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir, './foo'])
self.assertPathExists(os.path.join(self.builddir, get_static_lib_name('foo')))
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'sub/foo'])
self.assertPathExists(os.path.join(self.builddir, 'sub', get_static_lib_name('foo')))
# run_target
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir, extra_args=['--wipe'])
out = self._run([*self.meson_command, 'compile', '-C', self.builddir, 'py3hi'])
self.assertIn('I am Python3.', out)
# `--$BACKEND-args`
testdir = os.path.join(self.common_test_dir, '1 trivial')
if self.backend is Backend.ninja:
self.init(testdir, extra_args=['--wipe'])
# Dry run - should not create a program
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--ninja-args=-n'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
elif self.backend is Backend.vs:
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir])
# Explicitly clean the target through msbuild interface
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--vs-args=-t:{}:Clean'.format(re.sub(r'[\%\$\@\;\.\(\)\']', '_', get_exe_name('trivialprog')))])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
def test_spurious_reconfigure_built_dep_file(self):
testdir = os.path.join(self.unit_test_dir, '75 dep files')
# Regression test: Spurious reconfigure was happening when build
# directory is inside source directory.
# See https://gitlab.freedesktop.org/gstreamer/gst-build/-/issues/85.
srcdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, srcdir)
builddir = os.path.join(srcdir, '_build')
self.change_builddir(builddir)
self.init(srcdir)
self.build()
# During first configure the file did not exist so no dependency should
# have been set. A rebuild should not trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
self.init(srcdir, extra_args=['--reconfigure'])
# During the reconfigure the file did exist, but is inside build
# directory, so no dependency should have been set. A rebuild should not
# trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
def _test_junit(self, case: str) -> None:
try:
import lxml.etree as et
except ImportError:
raise unittest.SkipTest('lxml required, but not found.')
schema = et.XMLSchema(et.parse(str(Path(__file__).parent / 'data' / 'schema.xsd')))
self.init(case)
self.run_tests()
junit = et.parse(str(Path(self.builddir) / 'meson-logs' / 'testlog.junit.xml'))
try:
schema.assertValid(junit)
except et.DocumentInvalid as e:
self.fail(e.error_log)
def test_junit_valid_tap(self):
self._test_junit(os.path.join(self.common_test_dir, '213 tap tests'))
def test_junit_valid_exitcode(self):
self._test_junit(os.path.join(self.common_test_dir, '44 test args'))
def test_junit_valid_gtest(self):
self._test_junit(os.path.join(self.framework_test_dir, '2 gtest'))
def test_link_language_linker(self):
# TODO: there should be some way to query how we're linking things
# without resorting to reading the ninja.build file
if self.backend is not Backend.ninja:
raise unittest.SkipTest('This test reads the ninja file')
testdir = os.path.join(self.common_test_dir, '232 link language')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
self.assertRegex(contents, r'build main(\.exe)?.*: c_LINKER')
self.assertRegex(contents, r'build (lib|cyg)?mylib.*: c_LINKER')
def test_commands_documented(self):
'''
Test that all listed meson commands are documented in Commands.md.
'''
# The docs directory is not in release tarballs.
if not os.path.isdir('docs'):
raise unittest.SkipTest('Doc directory does not exist.')
doc_path = 'docs/markdown_dynamic/Commands.md'
md = None
with open(doc_path, encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
## Get command sections
section_pattern = re.compile(r'^### (.+)$', re.MULTILINE)
md_command_section_matches = [i for i in section_pattern.finditer(md)]
md_command_sections = dict()
for i, s in enumerate(md_command_section_matches):
section_end = len(md) if i == len(md_command_section_matches) - 1 else md_command_section_matches[i + 1].start()
md_command_sections[s.group(1)] = (s.start(), section_end)
## Validate commands
md_commands = set(k for k,v in md_command_sections.items())
help_output = self._run(self.meson_command + ['--help'])
help_commands = set(c.strip() for c in re.findall(r'usage:(?:.+)?{((?:[a-z]+,*)+?)}', help_output, re.MULTILINE|re.DOTALL)[0].split(','))
self.assertEqual(md_commands | {'help'}, help_commands, 'Doc file: `{}`'.format(doc_path))
## Validate that each section has proper placeholders
def get_data_pattern(command):
return re.compile(
r'^```[\r\n]'
r'{{ cmd_help\[\'' + command + r'\'\]\[\'usage\'\] }}[\r\n]'
r'^```[\r\n]'
r'.*?'
r'^```[\r\n]'
r'{{ cmd_help\[\'' + command + r'\'\]\[\'arguments\'\] }}[\r\n]'
r'^```',
flags = re.MULTILINE|re.DOTALL)
for command in md_commands:
m = get_data_pattern(command).search(md, pos=md_command_sections[command][0], endpos=md_command_sections[command][1])
self.assertIsNotNone(m, 'Command `{}` is missing placeholders for dynamic data. Doc file: `{}`'.format(command, doc_path))
def _check_coverage_files(self, types=('text', 'xml', 'html')):
covdir = Path(self.builddir) / 'meson-logs'
files = []
if 'text' in types:
files.append('coverage.txt')
if 'xml' in types:
files.append('coverage.xml')
if 'html' in types:
files.append('coveragereport/index.html')
for f in files:
self.assertTrue((covdir / f).is_file(), msg='{} is not a file'.format(f))
def test_coverage(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_complex(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '109 generatorcustom')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_html(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
self._check_coverage_files(['html'])
def test_coverage_text(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-text')
self._check_coverage_files(['text'])
def test_coverage_xml(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-xml')
self._check_coverage_files(['xml'])
def test_cross_file_constants(self):
with temp_filename() as crossfile1, temp_filename() as crossfile2:
with open(crossfile1, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
compiler = 'gcc'
'''))
with open(crossfile2, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
toolchain = '/toolchain/'
common_flags = ['--sysroot=' + toolchain / 'sysroot']
[properties]
c_args = common_flags + ['-DSOMETHING']
cpp_args = c_args + ['-DSOMETHING_ELSE']
[binaries]
c = toolchain / compiler
'''))
values = mesonbuild.coredata.parse_machine_files([crossfile1, crossfile2])
self.assertEqual(values['binaries']['c'], '/toolchain/gcc')
self.assertEqual(values['properties']['c_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING'])
self.assertEqual(values['properties']['cpp_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING', '-DSOMETHING_ELSE'])
@unittest.skipIf(is_windows(), 'Directory cleanup fails for some reason')
def test_wrap_git(self):
with tempfile.TemporaryDirectory() as tmpdir:
srcdir = os.path.join(tmpdir, 'src')
shutil.copytree(os.path.join(self.unit_test_dir, '81 wrap-git'), srcdir)
upstream = os.path.join(srcdir, 'subprojects', 'wrap_git_upstream')
upstream_uri = Path(upstream).as_uri()
_git_init(upstream)
with open(os.path.join(srcdir, 'subprojects', 'wrap_git.wrap'), 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = {}
patch_directory = wrap_git_builddef
revision = master
'''.format(upstream_uri)))
self.init(srcdir)
self.build()
self.run_tests()
def test_multi_output_custom_target_no_warning(self):
testdir = os.path.join(self.common_test_dir, '235 custom_target source')
out = self.init(testdir)
self.assertNotRegex(out, 'WARNING:.*Using the first one.')
self.build()
self.run_tests()
@unittest.skipUnless(is_linux() and (re.search('^i.86$|^x86$|^x64$|^x86_64$|^amd64$', platform.processor()) is not None),
'Requires ASM compiler for x86 or x86_64 platform currently only available on Linux CI runners')
def test_nostdlib(self):
testdir = os.path.join(self.unit_test_dir, '79 nostdlib')
machinefile = os.path.join(self.builddir, 'machine.txt')
with open(machinefile, 'w') as f:
f.write(textwrap.dedent('''
[properties]
c_stdlib = 'mylibc'
'''))
# Test native C stdlib
self.meson_native_file = machinefile
self.init(testdir)
self.build()
# Test cross C stdlib
self.new_builddir()
self.meson_native_file = None
self.meson_cross_file = machinefile
self.init(testdir)
self.build()
def test_meson_version_compare(self):
testdir = os.path.join(self.unit_test_dir, '82 meson version compare')
out = self.init(testdir)
self.assertNotRegex(out, r'WARNING')
def test_wrap_redirect(self):
redirect_wrap = os.path.join(self.builddir, 'redirect.wrap')
real_wrap = os.path.join(self.builddir, 'foo/subprojects/real.wrap')
os.makedirs(os.path.dirname(real_wrap))
# Invalid redirect, filename must have .wrap extension
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/subprojects/real.wrapper
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be a .wrap file'):
PackageDefinition(redirect_wrap)
# Invalid redirect, filename cannot be in parent directory
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = ../real.wrap
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename cannot contain ".."'):
PackageDefinition(redirect_wrap)
# Invalid redirect, filename must be in foo/subprojects/real.wrap
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/real.wrap
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be in the form foo/subprojects/bar.wrap'):
wrap = PackageDefinition(redirect_wrap)
# Correct redirect
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/subprojects/real.wrap
'''))
with open(real_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = http://invalid
'''))
wrap = PackageDefinition(redirect_wrap)
self.assertEqual(wrap.get('url'), 'http://invalid')
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "Item must be a list or one of <class 'str'>"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(BOOST_ROOT.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. A wrap file from a subproject is used but fails because it does not
contain required keys.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Subproject directory not found and .*nosubproj.wrap.* file not found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'Dependency .*somenotfounddep.* from subproject .*subprojects/somesubproj.* found: .*NO.*')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
self.assertRegex(out, r'Missing key .*source_filename.* in subsubproject.wrap')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
def test_assert_default_message(self):
self.assertMesonRaises("k1 = 'a'\n" +
"assert({\n" +
" k1: 1,\n" +
"}['a'] == 2)\n",
r"Assert failed: {k1 : 1}\['a'\] == 2")
def test_wrap_nofallback(self):
self.assertMesonRaises("dependency('notfound', fallback : ['foo', 'foo_dep'])",
r"Dependency \'notfound\' not found and fallback is disabled",
extra_args=['--wrap-mode=nofallback'])
def test_message(self):
self.assertMesonOutputs("message('Array:', ['a', 'b'])",
r"Message:.* Array: \['a', 'b'\]")
def test_warning(self):
self.assertMesonOutputs("warning('Array:', ['a', 'b'])",
r"WARNING:.* Array: \['a', 'b'\]")
def test_override_dependency_twice(self):
self.assertMesonRaises("meson.override_dependency('foo', declare_dependency())\n" +
"meson.override_dependency('foo', declare_dependency())",
"""Tried to override dependency 'foo' which has already been resolved or overridden""")
@unittest.skipIf(is_windows(), 'zlib is not available on Windows')
def test_override_resolved_dependency(self):
self.assertMesonRaises("dependency('zlib')\n" +
"meson.override_dependency('zlib', declare_dependency())",
"""Tried to override dependency 'zlib' which has already been resolved or overridden""")
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
@mock.patch.dict(os.environ)
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd.exe with args without searching
prog = ExternalProgram('cmd', command=['cmd', '/C'])
self.assertTrue(prog.found(), msg='cmd not found with args')
self.assertPathEqual(prog.get_command()[0], 'cmd')
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# If `.PY` is in PATHEXT, scripts can be found as programs
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
# Finding a script in PATH w/o extension works and adds the interpreter
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script with an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script-ext.py', command=[os.path.join(testdir, 'test-script-ext.py'), '--help'])
self.assertTrue(prog.found(), msg='test-script-ext.py with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script without an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script', command=[os.path.join(testdir, 'test-script'), '--help'])
self.assertTrue(prog.found(), msg='test-script with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script')
# Ensure that WindowsApps gets removed from PATH
path = os.environ['PATH']
if 'WindowsApps' not in path:
username = os.environ['USERNAME']
appstore_dir = r'C:\Users\{}\AppData\Local\Microsoft\WindowsApps'.format(username)
path = os.pathsep + appstore_dir
path = ExternalProgram._windows_sanitize_path(path)
self.assertNotIn('WindowsApps', path)
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
def _check_ld(self, name: str, lang: str, expected: str) -> None:
if not shutil.which(name):
raise unittest.SkipTest('Could not find {}.'.format(name))
envvars = [mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if envvars[0] in mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP:
envvars.append(
mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP[envvars[0]])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
try:
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('Could not find a compiler for {}'.format(lang))
self.assertEqual(comp.linker.id, expected)
def test_link_environment_variable_lld_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('lld-link', 'c', 'lld-link')
def test_link_environment_variable_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('link', 'c', 'link')
def test_link_environment_variable_optlink(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('optlink', 'c', 'optlink')
@skip_if_not_language('rust')
def test_link_environment_variable_rust(self):
self._check_ld('link', 'rust', 'link')
@skip_if_not_language('d')
def test_link_environment_variable_d(self):
env = get_fake_env()
comp = getattr(env, 'detect_d_compiler')(MachineChoice.HOST)
if comp.id == 'dmd':
raise unittest.SkipTest('meson cannot reliably make DMD use a different linker.')
self._check_ld('lld-link', 'd', 'lld-link')
def test_pefile_checksum(self):
try:
import pefile
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('pefile module not found')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--buildtype=release'])
self.build()
# Test that binaries have a non-zero checksum
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
cc_id = cc.get_id()
ld_id = cc.get_linker_id()
dll = glob(os.path.join(self.builddir, '*mycpplib.dll'))[0]
exe = os.path.join(self.builddir, 'cppprog.exe')
for f in (dll, exe):
pe = pefile.PE(f)
msg = 'PE file: {!r}, compiler: {!r}, linker: {!r}'.format(f, cc_id, ld_id)
if cc_id == 'clang-cl':
# Latest clang-cl tested (7.0) does not write checksums out
self.assertFalse(pe.verify_checksum(), msg=msg)
else:
# Verify that a valid checksum was written by all other compilers
self.assertTrue(pe.verify_checksum(), msg=msg)
def test_qt5dependency_vscrt(self):
'''
Test that qt5 dependencies use the debug module suffix when b_vscrt is
set to 'mdd'
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if 'b_vscrt' not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake') and not is_ci():
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output and not is_ci():
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Setup with /MDd
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
# Verify that we're linking to the debug versions of Qt DLLs
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build qt5core.exe: cpp_LINKER.*Qt5Cored.lib', contents)
self.assertIsNotNone(m, msg=contents)
def test_compiler_checks_vscrt(self):
'''
Test that the correct VS CRT is used when running compiler checks
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if 'b_vscrt' not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
def sanitycheck_vscrt(vscrt):
checks = self.get_meson_log_sanitychecks()
self.assertTrue(len(checks) > 0)
for check in checks:
self.assertIn(vscrt, check)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=debugoptimized'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=release'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=md'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mt'])
sanitycheck_vscrt('/MT')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mtd'])
sanitycheck_vscrt('/MTd')
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '152 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
def test_removing_unused_linker_args(self):
testdir = os.path.join(self.common_test_dir, '108 has arg')
env = {'CFLAGS': '-L/tmp -L /var/tmp -headerpad_max_install_names -Wl,-export_dynamic -framework Foundation'}
self.init(testdir, override_envvars=env)
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
@mock.patch.dict(os.environ)
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
libhello_nolib = PkgConfigDependency('libhello_nolib', env, kwargs)
self.assertTrue(libhello_nolib.found())
self.assertEqual(libhello_nolib.get_link_args(), [])
self.assertEqual(libhello_nolib.get_compile_args(), [])
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
env = {
'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2]),
'PKG_CONFIG_SYSTEM_LIBRARY_PATH': '/usr/lib',
}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
# See common/47 pkgconfig-gen/meson.build for description of the case this test
with open(os.path.join(privatedir1, 'simple2.pc')) as f:
content = f.read()
self.assertIn('Libs: -L${libdir} -lsimple2 -lz -lsimple1', content)
with open(os.path.join(privatedir1, 'simple3.pc')) as f:
content = f.read()
self.assertEqual(1, content.count('-lsimple3'))
with open(os.path.join(privatedir1, 'simple5.pc')) as f:
content = f.read()
self.assertNotIn('-lstat2', content)
@mock.patch.dict(os.environ)
def test_pkgconfig_uninstalled(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
self.build()
os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(self.builddir, 'meson-uninstalled')
if is_cygwin():
os.environ['PATH'] += os.pathsep + self.builddir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir)
self.build()
self.run_tests()
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def glob_sofiles_without_privdir(self, g):
files = glob(g)
return [f for f in files if not f.endswith('.p')]
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(self.glob_sofiles_without_privdir(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(self.glob_sofiles_without_privdir(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(self.glob_sofiles_without_privdir(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '39 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '39 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir, compiler, p: str):
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
for v in compiler.get_options()['std'].choices:
lang_std = p + '_std'
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
std_opt = '{}={}'.format(lang_std, v)
self.init(testdir, extra_args=['-D' + std_opt])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print('{} was {!r}'.format(lang_std, v))
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if p == 'c':
env_flag_name = 'CFLAGS'
elif p == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(p))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc, 'c')
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp, 'cpp')
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir, extra_args='--unity=subprojects')
pdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/simpletest*.p'))
self.assertEqual(len(pdirs), 1)
self.assertPathExists(os.path.join(pdirs[0], 'simpletest-unity0.c'))
sdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/*sublib*.p'))
self.assertEqual(len(sdirs), 1)
self.assertPathExists(os.path.join(sdirs[0], 'sublib-unity0.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '195 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_introspect_installed(self):
testdir = os.path.join(self.linuxlike_test_dir, '7 library versions')
self.init(testdir)
install = self.introspect('--installed')
install = {os.path.basename(k): v for k, v in install.items()}
print(install)
if is_osx():
the_truth = {
'libmodule.dylib': '/usr/lib/libmodule.dylib',
'libnoversion.dylib': '/usr/lib/libnoversion.dylib',
'libonlysoversion.5.dylib': '/usr/lib/libonlysoversion.5.dylib',
'libonlysoversion.dylib': '/usr/lib/libonlysoversion.dylib',
'libonlyversion.1.dylib': '/usr/lib/libonlyversion.1.dylib',
'libonlyversion.dylib': '/usr/lib/libonlyversion.dylib',
'libsome.0.dylib': '/usr/lib/libsome.0.dylib',
'libsome.dylib': '/usr/lib/libsome.dylib',
}
the_truth_2 = {'/usr/lib/libsome.dylib',
'/usr/lib/libsome.0.dylib',
}
else:
the_truth = {
'libmodule.so': '/usr/lib/libmodule.so',
'libnoversion.so': '/usr/lib/libnoversion.so',
'libonlysoversion.so': '/usr/lib/libonlysoversion.so',
'libonlysoversion.so.5': '/usr/lib/libonlysoversion.so.5',
'libonlyversion.so': '/usr/lib/libonlyversion.so',
'libonlyversion.so.1': '/usr/lib/libonlyversion.so.1',
'libonlyversion.so.1.4.5': '/usr/lib/libonlyversion.so.1.4.5',
'libsome.so': '/usr/lib/libsome.so',
'libsome.so.0': '/usr/lib/libsome.so.0',
'libsome.so.1.2.3': '/usr/lib/libsome.so.1.2.3',
}
the_truth_2 = {'/usr/lib/libsome.so',
'/usr/lib/libsome.so.0',
'/usr/lib/libsome.so.1.2.3'}
self.assertDictEqual(install, the_truth)
targets = self.introspect('--targets')
for t in targets:
if t['name'] != 'some':
continue
self.assertSetEqual(the_truth_2, set(t['install_filename']))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
# C program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
# C++ program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
def test_global_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
if is_osx():
raise unittest.SkipTest('Global RPATHs via LDFLAGS not yet supported on MacOS (does anybody need it?)')
testdir = os.path.join(self.unit_test_dir, '80 global-rpath')
oldinstalldir = self.installdir
# Build and install an external library without DESTDIR.
# The external library generates a .pc file without an rpath.
yonder_dir = os.path.join(testdir, 'yonder')
yonder_prefix = os.path.join(oldinstalldir, 'yonder')
yonder_libdir = os.path.join(yonder_prefix, self.libdir)
self.prefix = yonder_prefix
self.installdir = yonder_prefix
self.init(yonder_dir)
self.build()
self.install(use_destdir=False)
# Since rpath has multiple valid formats we need to
# test that they are all properly used.
rpath_formats = [
('-Wl,-rpath=', False),
('-Wl,-rpath,', False),
('-Wl,--just-symbols=', True),
('-Wl,--just-symbols,', True),
('-Wl,-R', False),
('-Wl,-R,', False)
]
for rpath_format, exception in rpath_formats:
# Build an app that uses that installed library.
# Supply the rpath to the installed library via LDFLAGS
# (as systems like buildroot and guix are wont to do)
# and verify install preserves that rpath.
self.new_builddir()
env = {'LDFLAGS': rpath_format + yonder_libdir,
'PKG_CONFIG_PATH': os.path.join(yonder_libdir, 'pkgconfig')}
if exception:
with self.assertRaises(subprocess.CalledProcessError):
self.init(testdir, override_envvars=env)
continue
self.init(testdir, override_envvars=env)
self.build()
self.install(use_destdir=False)
got_rpath = get_rpath(os.path.join(yonder_prefix, 'bin/rpathified'))
self.assertEqual(got_rpath, yonder_libdir, rpath_format)
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
crossfile.write(textwrap.dedent('''\
[binaries]
c = '/usr/bin/{1}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
sometool.py = ['{0}']
someothertool.py = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
''').format(os.path.join(testdir, 'some_cross_tool.py'),
'gcc' if is_sunos() else 'cc'))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({'pkg_config_path': pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '67 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.src_root, 'test cases', 'native', '201 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not (is_osx() or is_linux()):
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
if not is_osx():
# Rest of the workflow only works on macOS
return
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
@skipIfNoPkgconfig
def test_usage_pkgconfig_prefixes(self):
'''
Build and install two external libraries, to different prefixes,
then build and install a client program that finds them via pkgconfig,
and verify the installed client program runs.
'''
oldinstalldir = self.installdir
# Build and install both external libraries without DESTDIR
val1dir = os.path.join(self.unit_test_dir, '77 pkgconfig prefixes', 'val1')
val1prefix = os.path.join(oldinstalldir, 'val1')
self.prefix = val1prefix
self.installdir = val1prefix
self.init(val1dir)
self.build()
self.install(use_destdir=False)
self.new_builddir()
env1 = {}
env1['PKG_CONFIG_PATH'] = os.path.join(val1prefix, self.libdir, 'pkgconfig')
val2dir = os.path.join(self.unit_test_dir, '77 pkgconfig prefixes', 'val2')
val2prefix = os.path.join(oldinstalldir, 'val2')
self.prefix = val2prefix
self.installdir = val2prefix
self.init(val2dir, override_envvars=env1)
self.build()
self.install(use_destdir=False)
self.new_builddir()
# Build, install, and run the client program
env2 = {}
env2['PKG_CONFIG_PATH'] = os.path.join(val2prefix, self.libdir, 'pkgconfig')
testdir = os.path.join(self.unit_test_dir, '77 pkgconfig prefixes', 'client')
testprefix = os.path.join(oldinstalldir, 'client')
self.prefix = testprefix
self.installdir = testprefix
self.init(testdir, override_envvars=env2)
self.build()
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'client')
env3 = {}
if is_cygwin():
env3['PATH'] = os.path.join(val1prefix, 'bin') + \
os.pathsep + \
os.path.join(val2prefix, 'bin') + \
os.pathsep + os.environ['PATH']
out = self._run([prog], override_envvars=env3).strip()
# Expected output is val1 + val2 = 3
self.assertEqual(out, '3')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
with chdir(subdir):
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('62 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
linker = cc.linker
if not linker.export_dynamic_args(env):
raise unittest.SkipTest('Not applicable for linkers without --export-dynamic')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
nativefile = tempfile.NamedTemporaryFile(mode='w')
nativefile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'build_wrapper.py')))
nativefile.flush()
self.meson_native_file = nativefile.name
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir)
def test_identity_cross_env(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
env = {
'CC_FOR_BUILD': '"' + os.path.join(testdir, 'build_wrapper.py') + '"',
}
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '68 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = ['-Dc_link_args=-L{}'.format(libdir),
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '68 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None:
if is_sunos():
raise unittest.SkipTest('Solaris currently cannot override the linker.')
if not shutil.which(check):
raise unittest.SkipTest('Could not find {}.'.format(check))
envvars = [mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if envvars[0] in mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP:
envvars.append(
mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP[envvars[0]])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
if lang != 'rust' and comp.use_linker_args('bfd') == []:
raise unittest.SkipTest(
'Compiler {} does not support using alternative linkers'.format(comp.id))
self.assertEqual(comp.linker.id, expected)
def test_ld_environment_variable_bfd(self):
self._check_ld('ld.bfd', 'bfd', 'c', 'ld.bfd')
def test_ld_environment_variable_gold(self):
self._check_ld('ld.gold', 'gold', 'c', 'ld.gold')
def test_ld_environment_variable_lld(self):
self._check_ld('ld.lld', 'lld', 'c', 'ld.lld')
@skip_if_not_language('rust')
def test_ld_environment_variable_rust(self):
self._check_ld('ld.gold', 'gold', 'rust', 'ld.gold')
def test_ld_environment_variable_cpp(self):
self._check_ld('ld.gold', 'gold', 'cpp', 'ld.gold')
@skip_if_not_language('objc')
def test_ld_environment_variable_objc(self):
self._check_ld('ld.gold', 'gold', 'objc', 'ld.gold')
@skip_if_not_language('objcpp')
def test_ld_environment_variable_objcpp(self):
self._check_ld('ld.gold', 'gold', 'objcpp', 'ld.gold')
@skip_if_not_language('fortran')
def test_ld_environment_variable_fortran(self):
self._check_ld('ld.gold', 'gold', 'fortran', 'ld.gold')
@skip_if_not_language('d')
def test_ld_environment_variable_d(self):
# At least for me, ldc defaults to gold, and gdc defaults to bfd, so
# let's pick lld, which isn't the default for either (currently)
self._check_ld('ld.lld', 'lld', 'd', 'ld.lld')
def compute_sha256(self, filename):
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def test_wrap_with_file_url(self):
testdir = os.path.join(self.unit_test_dir, '74 wrap file url')
source_filename = os.path.join(testdir, 'subprojects', 'foo.tar.xz')
patch_filename = os.path.join(testdir, 'subprojects', 'foo-patch.tar.xz')
wrap_filename = os.path.join(testdir, 'subprojects', 'foo.wrap')
source_hash = self.compute_sha256(source_filename)
patch_hash = self.compute_sha256(patch_filename)
wrap = textwrap.dedent("""\
[wrap-file]
directory = foo
source_url = http://server.invalid/foo
source_fallback_url = file://{}
source_filename = foo.tar.xz
source_hash = {}
patch_url = http://server.invalid/foo
patch_fallback_url = file://{}
patch_filename = foo-patch.tar.xz
patch_hash = {}
""".format(source_filename, source_hash, patch_filename, patch_hash))
with open(wrap_filename, 'w') as f:
f.write(wrap)
self.init(testdir)
self.build()
self.run_tests()
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'packagecache'))
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'foo'))
os.unlink(wrap_filename)
def test_no_rpath_for_static(self):
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertIsNone(build_rpath)
def test_lookup_system_after_broken_fallback(self):
# Just to generate libfoo.pc so we can test system dependency lookup.
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir = self.privatedir
# Write test project where the first dependency() returns not-found
# because 'broken' subproject does not exit, but that should not prevent
# the 2nd dependency() to lookup on system.
self.new_builddir()
with tempfile.TemporaryDirectory() as d:
with open(os.path.join(d, 'meson.build'), 'w') as f:
f.write(textwrap.dedent('''\
project('test')
dependency('notfound', fallback: 'broken', required: false)
dependency('libfoo', fallback: 'broken', required: true)
'''))
self.init(d, override_envvars={'PKG_CONFIG_LIBDIR': privatedir})
def test_as_link_whole(self):
testdir = os.path.join(self.unit_test_dir, '79 as link whole')
self.init(testdir)
with open(os.path.join(self.privatedir, 'bar1.pc')) as f:
content = f.read()
self.assertIn('-lfoo', content)
with open(os.path.join(self.privatedir, 'bar2.pc')) as f:
content = f.read()
self.assertNotIn('-lfoo', content)
class BaseLinuxCrossTests(BasePlatformTests):
# Don't pass --libdir when cross-compiling. We have tests that
# check whether meson auto-detects it correctly.
libdir = None
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_cross_libdir_subproject(self):
# Guard against a regression where calling "subproject"
# would reset the value of libdir to its default value.
testdir = os.path.join(self.unit_test_dir, '78 subdir libdir')
self.init(testdir, extra_args=['--libdir=fuf'])
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'fuf')
return
self.assertTrue(False, 'Libdir specified on command line gets reset.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'):
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
if isinstance(v, (bool, int, float)):
f.write("{}={}\n".format(k, v))
elif isinstance(v, list):
f.write("{}=[{}]\n".format(k, ', '.join(["'{}'".format(w) for w in v])))
else:
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
f.write(r'@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary, entry=None):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {entry or binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
elif is_osx():
binary = 'python'
else:
binary = 'python2'
# We not have python2, check for it
for v in ['2', '2.7', '-2.7']:
rc = subprocess.call(['pkg-config', '--cflags', 'python{}'.format(v)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc == 0:
break
else:
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', binary, entry='python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang, binary, version_str, version):
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = wrapper
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = wrapper
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
def test_user_options(self):
testcase = os.path.join(self.common_test_dir, '43 options')
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0),
('CaseSenSiTivE', 'SOME other Value'),
('CASESENSITIVE', 'some other Value')]:
config = self.helper_create_native_file({'project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_user_options_command_line_overrides(self):
testcase = os.path.join(self.common_test_dir, '43 options')
config = self.helper_create_native_file({'project options': {'other_one': True}})
self.init(testcase, extra_args=['--native-file', config, '-Dother_one=false'])
def test_user_options_subproject(self):
testcase = os.path.join(self.unit_test_dir, '79 user options for subproject')
s = os.path.join(testcase, 'subprojects')
if not os.path.exists(s):
os.mkdir(s)
s = os.path.join(s, 'sub')
if not os.path.exists(s):
sub = os.path.join(self.common_test_dir, '43 options')
shutil.copytree(sub, s)
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0)]:
config = self.helper_create_native_file({'sub:project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_option_bool(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'werror': True}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'werror' in each['name']:
self.assertEqual(each['value'], True)
break
else:
self.fail('Did not find werror in build options?')
def test_option_integer(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'unity_size': 100}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'unity_size' in each['name']:
self.assertEqual(each['value'], 100)
break
else:
self.fail('Did not find unity_size in build options?')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('Did not find werror in build options?')
def test_builtin_options_env_overrides_conf(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'pkg_config_path': '/foo'}})
self.init(testcase, extra_args=['--native-file', config], override_envvars={'PKG_CONFIG_PATH': '/bar'})
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/bar'])
break
else:
self.fail('Did not find pkg_config_path in build options?')
def test_builtin_options_subprojects(self):
testcase = os.path.join(self.common_test_dir, '102 subproject subdir')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both', 'c_args': ['-Dfoo']}, 'sub:built-in options': {'default_library': 'static'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'c_args' in each['name']:
# This path will be hit twice, once for build and once for host,
self.assertEqual(each['value'], ['-Dfoo'])
found += 1
elif each['name'] == 'default_library':
self.assertEqual(each['value'], 'both')
found += 1
elif each['name'] == 'sub:default_library':
self.assertEqual(each['value'], 'static')
found += 1
self.assertEqual(found, 4, 'Did not find all three sections')
def test_builtin_options_subprojects_overrides_buildfiles(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '230 persubproject options')
config = self.helper_create_native_file({'sub2:built-in options': {'default_library': 'shared'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertIn(cm.exception.stdout, 'Parent should override default_library')
def test_builtin_options_subprojects_inherits_parent_override(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '230 persubproject options')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertIn(cm.exception.stdout, 'Parent should override default_library')
def test_builtin_options_compiler_properties(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'c_args': ['-DFOO']},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DFOO'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_compiler_properties_legacy(self):
# The legacy placement in properties is still valid if a 'built-in
# options' setting is present, but doesn't have the lang_args
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DBAR'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_paths(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'bindir': 'foo'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'foo')
break
else:
self.fail('Did not find bindir in build options?')
def test_builtin_options_paths_legacy(self):
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'bar')
break
else:
self.fail('Did not find bindir in build options?')
def test_builtin_options_paths_legacy(self):
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'bar')
break
else:
self.fail('Did not find bindir in build options?')
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functionality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def setUp(self):
super().setUp()
self.current_config = 0
self.current_wrapper = 0
def _cross_file_generator(self, *, needs_exe_wrapper: bool = False,
exe_wrapper: T.Optional[T.List[str]] = None) -> str:
if is_windows():
raise unittest.SkipTest('Cannot run this test on non-mingw/non-cygwin windows')
if is_sunos():
cc = 'gcc'
else:
cc = 'cc'
return textwrap.dedent("""\
[binaries]
c = '/usr/bin/{}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
{}
[properties]
needs_exe_wrapper = {}
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""".format(cc,
'exe_wrapper = {}'.format(str(exe_wrapper)) if exe_wrapper is not None else '',
needs_exe_wrapper))
def _stub_exe_wrapper(self) -> str:
return textwrap.dedent('''\
#!/usr/bin/env python3
import subprocess
import sys
sys.exit(subprocess.run(sys.argv[1:]).returncode)
''')
def test_needs_exe_wrapper_true(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
def test_needs_exe_wrapper_false(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=False))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertNotRegex(out, r'Skipped:\s*1\n')
def test_needs_exe_wrapper_true_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
s = Path(d) / 'wrapper.py'
with s.open('wt') as f:
f.write(self._stub_exe_wrapper())
s.chmod(0o774)
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(
needs_exe_wrapper=True,
exe_wrapper=[str(s)]))
self.init(testdir, extra_args=['--cross-file=' + str(p), '-Dexpect=true'])
out = self.run_target('test')
self.assertRegex(out, r'Ok:\s*3\s*\n')
def test_cross_exe_passed_no_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
self.build()
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
# The test uses mocking and thus requires that the current process is the
# one to run the Meson steps. If we are using an external test executable
# (most commonly in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = self._cross_file_generator()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def helper_create_cross_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_user_options(self):
# This is just a touch test for cross file, since the implementation
# shares code after loading from the files
testcase = os.path.join(self.common_test_dir, '43 options')
config = self.helper_create_cross_file({'project options': {'testoption': 'some other value'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--cross-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--cross-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('No c++ standard set?')
def test_builtin_options_per_machine(self):
"""Test options that are allowed to be set on a per-machine basis.
Such options could be passed twice, once for the build machine, and
once for the host machine. I've picked pkg-config path, but any would
do that can be set for both.
"""
testcase = os.path.join(self.common_test_dir, '2 cpp')
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross/path', 'cpp_std': 'c++17'}})
native = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native/path', 'cpp_std': 'c++14'}})
# Ensure that PKG_CONFIG_PATH is not set in the environment
with mock.patch.dict('os.environ'):
for k in ['PKG_CONFIG_PATH', 'PKG_CONFIG_PATH_FOR_BUILD']:
try:
del os.environ[k]
except KeyError:
pass
self.init(testcase, extra_args=['--cross-file', cross, '--native-file', native])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/cross/path'])
found += 1
elif each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++17')
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/native/path'])
found += 1
elif each['name'] == 'build.cpp_std':
self.assertEqual(each['value'], 'c++14')
found += 1
if found == 4:
break
self.assertEqual(found, 4, 'Did not find all sections.')
def test_builtin_options_env_overrides_conf(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/foo'}})
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/foo'}})
self.init(testcase, extra_args=['--native-file', config, '--cross-file', cross],
override_envvars={'PKG_CONFIG_PATH': '/bar', 'PKG_CONFIG_PATH_FOR_BUILD': '/dir'})
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/bar'])
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/dir'])
found += 1
if found == 2:
break
self.assertEqual(found, 2, 'Did not find all sections.')
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser(io.StringIO(s))
return iter(parser.parse())
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, count=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, count=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, count=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
class SubprojectsCommandTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.root_dir = Path(self.builddir)
self.project_dir = self.root_dir / 'src'
self._create_project(self.project_dir)
self.subprojects_dir = self.project_dir / 'subprojects'
os.makedirs(str(self.subprojects_dir))
def _create_project(self, path, project_name='dummy'):
os.makedirs(str(path), exist_ok=True)
with open(str(path / 'meson.build'), 'w') as f:
f.write("project('{}')".format(project_name))
def _git(self, cmd, workdir):
return git(cmd, str(workdir), check=True)[1].strip()
def _git_config(self, workdir):
self._git(['config', 'user.name', 'Meson Test'], workdir)
self._git(['config', 'user.email', 'meson.test@example.com'], workdir)
def _git_remote(self, cmd, name):
return self._git(cmd, self.root_dir / name)
def _git_local(self, cmd, name):
return self._git(cmd, self.subprojects_dir / name)
def _git_local_branch(self, name):
# Same as `git branch --show-current` but compatible with older git version
branch = self._git_local(['rev-parse', '--abbrev-ref', 'HEAD'], name)
return branch if branch != 'HEAD' else ''
def _git_local_commit(self, name, ref='HEAD'):
return self._git_local(['rev-parse', ref], name)
def _git_remote_commit(self, name, ref='HEAD'):
return self._git_remote(['rev-parse', ref], name)
def _git_create_repo(self, path):
self._create_project(path)
self._git(['init'], path)
self._git_config(path)
self._git(['add', '.'], path)
self._git(['commit', '-m', 'Initial commit'], path)
def _git_create_remote_repo(self, name):
self._git_create_repo(self.root_dir / name)
def _git_create_local_repo(self, name):
self._git_create_repo(self.subprojects_dir / name)
def _git_create_remote_commit(self, name, branch):
self._git_remote(['checkout', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', 'initial {} commit'.format(branch)], name)
def _git_create_remote_branch(self, name, branch):
self._git_remote(['checkout', '-b', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', 'initial {} commit'.format(branch)], name)
def _git_create_remote_tag(self, name, tag):
self._git_remote(['commit', '--allow-empty', '-m', 'tag {} commit'.format(tag)], name)
self._git_remote(['tag', tag], name)
def _wrap_create_git(self, name, revision='master'):
path = self.root_dir / name
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
'''
[wrap-git]
url={}
revision={}
'''.format(os.path.abspath(str(path)), revision)))
def _wrap_create_file(self, name, tarball='dummy.tar.gz'):
path = self.root_dir / tarball
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
'''
[wrap-file]
source_url={}
'''.format(os.path.abspath(str(path)))))
def _subprojects_cmd(self, args):
return self._run(self.meson_command + ['subprojects'] + args, workdir=str(self.project_dir))
def test_git_update(self):
subp_name = 'sub1'
# Create a fake remote git repository and a wrap file. Checks that
# "meson subprojects download" works.
self._git_create_remote_repo(subp_name)
self._wrap_create_git(subp_name)
self._subprojects_cmd(['download'])
self.assertPathExists(str(self.subprojects_dir / subp_name))
self._git_config(self.subprojects_dir / subp_name)
# Create a new remote branch and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new branch.
self._git_create_remote_branch(subp_name, 'newbranch')
self._wrap_create_git(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch. Checks the new commit is pulled into existing
# local newbranch. Make sure it does not print spurious 'git stash' message.
self._git_create_remote_commit(subp_name, 'newbranch')
out = self._subprojects_cmd(['update', '--reset'])
self.assertNotIn('No local changes to save', out)
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch and switch to another branch. Checks that it
# switch current branch to newbranch and pull latest commit.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Stage some local changes then update. Checks that local changes got
# stashed.
self._create_project(self.subprojects_dir / subp_name, 'new_project_name')
self._git_local(['add', '.'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
self.assertTrue(self._git_local(['stash', 'list'], subp_name))
# Create a new remote tag and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new tag in detached mode.
self._git_create_remote_tag(subp_name, 'newtag')
self._wrap_create_git(subp_name, 'newtag')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newtag'))
# Create a new remote commit and update the wrap file with the commit id.
# Checks that "meson subprojects update --reset" checkout the new commit
# in detached mode.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
new_commit = self._git_remote(['rev-parse', 'HEAD'], subp_name)
self._wrap_create_git(subp_name, new_commit)
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), new_commit)
# Create a local project not in a git repository, then update it with
# a git wrap. Without --reset it should print error message and return
# failure. With --reset it should delete existing project and clone the
# new project.
subp_name = 'sub2'
self._create_project(self.subprojects_dir / subp_name)
self._git_create_remote_repo(subp_name)
self._wrap_create_git(subp_name)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self._subprojects_cmd(['update'])
self.assertIn('Not a git repository', cm.exception.output)
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name))
@skipIfNoExecutable('true')
def test_foreach(self):
self._create_project(self.subprojects_dir / 'sub_file')
self._wrap_create_file('sub_file')
self._git_create_local_repo('sub_git')
self._wrap_create_git('sub_git')
self._git_create_local_repo('sub_git_no_wrap')
def ran_in(s):
ret = []
prefix = 'Executing command in '
for l in s.splitlines():
if l.startswith(prefix):
ret.append(l[len(prefix):])
return sorted(ret)
dummy_cmd = ['true']
out = self._subprojects_cmd(['foreach'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git', 'subprojects/sub_git_no_wrap']))
out = self._subprojects_cmd(['foreach', '--types', 'git,file'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git']))
out = self._subprojects_cmd(['foreach', '--types', 'file'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_file'])
out = self._subprojects_cmd(['foreach', '--types', 'git'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_git'])
def _clang_at_least(compiler, minver: str, apple_minver: str) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
if arg in ('-f', '--failfast'):
arg = '--exitfirst'
pytest_args.append(arg)
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def running_single_tests(argv, cases):
'''
Check whether we only got arguments for running individual tests, not
entire testcases, and not all testcases (no test args).
'''
got_test_arg = False
for arg in argv:
if arg.startswith('-'):
continue
for case in cases:
if not arg.startswith(case):
continue
if '.' not in arg:
# Got a testcase, done
return False
got_test_arg = True
return got_test_arg
def main():
unset_envs()
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests', 'SubprojectsCommandTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
# Don't use pytest-xdist when running single unit tests since it wastes
# time spawning a lot of processes to distribute tests to in that case.
if not running_single_tests(sys.argv, cases):
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = ['-n', 'auto', './run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
# Fallback to plain unittest.
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
print('Meson build system', mesonbuild.coredata.version, 'Unit Tests')
start = time.monotonic()
try:
raise SystemExit(main())
finally:
print('Total time: {:.3f} seconds'.format(time.monotonic() - start))
|
DPPO.py
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import gym, threading, queue
from tgym.envs import SpreadTrading
from test import get_CSV_data
EP_MAX = 3455#6910
EP_LEN = 3455
N_WORKER = 4 # parallel workers
GAMMA = 0.9 # reward discount factor
A_LR = 0.00001 # learning rate for actor
C_LR = 0.00002 # learning rate for critic
MIN_BATCH_SIZE = 32 # minimum batch size for updating PPO
UPDATE_STEP = 20 # loop update operation n-steps
EPSILON = 0.2 # for clipping surrogate objective
GAME = 'Pendulum-v0'
S_DIM, A_DIM = 18, 3 # state and action dimension
class PPO(object):
def __init__(self):
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu)
self.v = tf.layers.dense(l1, 1)
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)
# actor
pi, pi_params = self._build_anet('pi', trainable=True)
oldpi, oldpi_params = self._build_anet('oldpi', trainable=False)
self.sample_op = tf.squeeze(pi.sample(1), axis=0) # operation of choosing action
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)]
self.tfa = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
# ratio = tf.exp(pi.log_prob(self.tfa) - oldpi.log_prob(self.tfa))
ratio = pi.prob(self.tfa) / (oldpi.prob(self.tfa) + 1e-5)
surr = ratio * self.tfadv # surrogate loss
self.aloss = -tf.reduce_mean(tf.minimum( # clipped surrogate objective
surr,
tf.clip_by_value(ratio, 1. - EPSILON, 1. + EPSILON) * self.tfadv))
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
self.sess.run(tf.global_variables_initializer())
def update(self):
global GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
if GLOBAL_EP < EP_MAX:
UPDATE_EVENT.wait() # wait until get batch of data
self.sess.run(self.update_oldpi_op) # copy pi to old pi
data = [QUEUE.get() for _ in range(QUEUE.qsize())] # collect data from all workers
data = np.vstack(data)
s, a, r = data[:, :S_DIM], data[:, S_DIM: S_DIM + A_DIM], data[:, -1:]
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
# update actor and critic in a update loop
[self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(UPDATE_STEP)]
[self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range(UPDATE_STEP)]
UPDATE_EVENT.clear() # updating finished
GLOBAL_UPDATE_COUNTER = 0 # reset counter
ROLLING_EVENT.set() # set roll-out available
def _build_anet(self, name, trainable):
with tf.variable_scope(name):
l1 = tf.layers.dense(self.tfs, 200, tf.nn.relu, trainable=trainable)
mu = 2 * tf.layers.dense(l1, A_DIM, tf.nn.tanh, trainable=trainable)
sigma = tf.layers.dense(l1, A_DIM, tf.nn.softplus, trainable=trainable)
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def choose_action(self, s):
#print("=========")
#print("s: ", s)
#print("=========")
s = s[np.newaxis, :]
a = self.sess.run(self.sample_op, {self.tfs: s})[0]
act = np.zeros(A_DIM)
act[np.argmax(a)] = 1
return act
def get_v(self, s):
if s.ndim < 2: s = s[np.newaxis, :]
return self.sess.run(self.v, {self.tfs: s})[0, 0]
class Worker(object):
def __init__(self, wid):
trading_fee = .007
time_fee = .0073
history_length = 1
#self.env = gym.make(GAME).unwrapped
generator = get_CSV_data(filename="./test_6.csv")
self.env = SpreadTrading(spread_coefficients=[1],
data_generator=generator,
trading_fee=trading_fee,
time_fee=time_fee,
history_length=history_length)
self.wid = wid
self.ppo = GLOBAL_PPO
def work(self):
global GLOBAL_EP, GLOBAL_RUNNING_R, GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
s = self.env.reset()
#print("=======")
#print(s)
#print("========")
ep_r = 0
buffer_s, buffer_a, buffer_r = [], [], []
for t in range(EP_LEN):
if not ROLLING_EVENT.is_set(): # while global PPO is updating
ROLLING_EVENT.wait() # wait until PPO is updated
buffer_s, buffer_a, buffer_r = [], [], [] # clear history buffer, use new policy to collect data
a = self.ppo.choose_action(s)
#print("=========")
#print("a: ", a)
#print("=========")
s_, r, done, _ = self.env.step(a)
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r + 8) / 8) # normalize reward, find to be useful
s = s_
ep_r += r
GLOBAL_UPDATE_COUNTER += 1 # count to minimum batch size, no need to wait other workers
if t == EP_LEN - 1 or GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
v_s_ = self.ppo.get_v(s_)
discounted_r = [] # compute discounted reward
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
QUEUE.put(np.hstack((bs, ba, br))) # put data in the queue
if GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
ROLLING_EVENT.clear() # stop collecting data
UPDATE_EVENT.set() # globalPPO update
if GLOBAL_EP >= EP_MAX: # stop training
COORD.request_stop()
break
# record reward changes, plot later
if len(GLOBAL_RUNNING_R) == 0: GLOBAL_RUNNING_R.append(ep_r)
else: GLOBAL_RUNNING_R.append(GLOBAL_RUNNING_R[-1]*0.9+ep_r*0.1)
GLOBAL_EP += 1
print('{0:.1f}%'.format(GLOBAL_EP/EP_MAX*100), '|W%i' % self.wid, '|Ep_r: %.2f' % ep_r,)
if __name__ == '__main__':
GLOBAL_PPO = PPO()
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
UPDATE_EVENT.clear() # not update now
ROLLING_EVENT.set() # start to roll out
workers = [Worker(wid=i) for i in range(N_WORKER)]
GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0
GLOBAL_RUNNING_R = []
COORD = tf.train.Coordinator()
QUEUE = queue.Queue() # workers putting data in this queue
threads = []
for worker in workers: # worker threads
t = threading.Thread(target=worker.work, args=())
t.start() # training
threads.append(t)
# add a PPO updating thread
threads.append(threading.Thread(target=GLOBAL_PPO.update,))
threads[-1].start()
COORD.join(threads)
# plot reward change and test
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('Episode'); plt.ylabel('Moving reward'); plt.ion(); plt.show()
#env = gym.make('Pendulum-v0')
trading_fee = .007
time_fee = .00724
history_length = 1
generator = get_CSV_data(filename="./test_6.csv")
env = SpreadTrading(spread_coefficients=[1],
data_generator=generator,
trading_fee=trading_fee,
time_fee=time_fee,
history_length=history_length)
while True:
s = env.reset()
for t in range(3455):
env.render()
s = env.step(GLOBAL_PPO.choose_action(s))[0]
|
twisted_test.py
|
# Author: Ovidiu Predescu
# Date: July 2011
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# pylint: skip-file
"""
Unittest for the twisted-style reactor.
"""
from __future__ import absolute_import, division, print_function
import logging
import os
import shutil
import signal
import sys
import tempfile
import threading
import warnings
from salt.ext.tornado.escape import utf8
from salt.ext.tornado import gen
from salt.ext.tornado.httpclient import AsyncHTTPClient
from salt.ext.tornado.httpserver import HTTPServer
from salt.ext.tornado.ioloop import IOLoop
from salt.ext.tornado.platform.auto import set_close_exec
from salt.ext.tornado.platform.select import SelectIOLoop
from salt.ext.tornado.testing import bind_unused_port
from salt.ext.tornado.test.util import unittest
from salt.ext.tornado.util import import_object, PY3
from salt.ext.tornado.web import RequestHandler, Application
try:
import fcntl
from twisted.internet.defer import Deferred, inlineCallbacks, returnValue # type: ignore
from twisted.internet.interfaces import IReadDescriptor, IWriteDescriptor # type: ignore
from twisted.internet.protocol import Protocol # type: ignore
from twisted.python import log # type: ignore
from salt.ext.tornado.platform.twisted import TornadoReactor, TwistedIOLoop
from zope.interface import implementer # type: ignore
have_twisted = True
except ImportError:
have_twisted = False
# The core of Twisted 12.3.0 is available on python 3, but twisted.web is not
# so test for it separately.
try:
from twisted.web.client import Agent, readBody # type: ignore
from twisted.web.resource import Resource # type: ignore
from twisted.web.server import Site # type: ignore
# As of Twisted 15.0.0, twisted.web is present but fails our
# tests due to internal str/bytes errors.
have_twisted_web = sys.version_info < (3,)
except ImportError:
have_twisted_web = False
if PY3:
import _thread as thread
else:
import thread
skipIfNoTwisted = unittest.skipUnless(have_twisted,
"twisted module not present")
skipIfPy26 = unittest.skipIf(sys.version_info < (2, 7),
"twisted incompatible with singledispatch in py26")
def save_signal_handlers():
saved = {}
for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGCHLD]:
saved[sig] = signal.getsignal(sig)
if "twisted" in repr(saved):
if not issubclass(IOLoop.configured_class(), TwistedIOLoop):
# when the global ioloop is twisted, we expect the signal
# handlers to be installed. Otherwise, it means we're not
# cleaning up after twisted properly.
raise Exception("twisted signal handlers already installed")
return saved
def restore_signal_handlers(saved):
for sig, handler in saved.items():
signal.signal(sig, handler)
class ReactorTestCase(unittest.TestCase):
def setUp(self):
self._saved_signals = save_signal_handlers()
self._io_loop = IOLoop()
self._reactor = TornadoReactor(self._io_loop)
def tearDown(self):
self._io_loop.close(all_fds=True)
restore_signal_handlers(self._saved_signals)
@skipIfNoTwisted
class ReactorWhenRunningTest(ReactorTestCase):
def test_whenRunning(self):
self._whenRunningCalled = False
self._anotherWhenRunningCalled = False
self._reactor.callWhenRunning(self.whenRunningCallback)
self._reactor.run()
self.assertTrue(self._whenRunningCalled)
self.assertTrue(self._anotherWhenRunningCalled)
def whenRunningCallback(self):
self._whenRunningCalled = True
self._reactor.callWhenRunning(self.anotherWhenRunningCallback)
self._reactor.stop()
def anotherWhenRunningCallback(self):
self._anotherWhenRunningCalled = True
@skipIfNoTwisted
class ReactorCallLaterTest(ReactorTestCase):
def test_callLater(self):
self._laterCalled = False
self._now = self._reactor.seconds()
self._timeout = 0.001
dc = self._reactor.callLater(self._timeout, self.callLaterCallback)
self.assertEqual(self._reactor.getDelayedCalls(), [dc])
self._reactor.run()
self.assertTrue(self._laterCalled)
self.assertTrue(self._called - self._now > self._timeout)
self.assertEqual(self._reactor.getDelayedCalls(), [])
def callLaterCallback(self):
self._laterCalled = True
self._called = self._reactor.seconds()
self._reactor.stop()
@skipIfNoTwisted
class ReactorTwoCallLaterTest(ReactorTestCase):
def test_callLater(self):
self._later1Called = False
self._later2Called = False
self._now = self._reactor.seconds()
self._timeout1 = 0.0005
dc1 = self._reactor.callLater(self._timeout1, self.callLaterCallback1)
self._timeout2 = 0.001
dc2 = self._reactor.callLater(self._timeout2, self.callLaterCallback2)
self.assertTrue(self._reactor.getDelayedCalls() == [dc1, dc2] or
self._reactor.getDelayedCalls() == [dc2, dc1])
self._reactor.run()
self.assertTrue(self._later1Called)
self.assertTrue(self._later2Called)
self.assertTrue(self._called1 - self._now > self._timeout1)
self.assertTrue(self._called2 - self._now > self._timeout2)
self.assertEqual(self._reactor.getDelayedCalls(), [])
def callLaterCallback1(self):
self._later1Called = True
self._called1 = self._reactor.seconds()
def callLaterCallback2(self):
self._later2Called = True
self._called2 = self._reactor.seconds()
self._reactor.stop()
@skipIfNoTwisted
class ReactorCallFromThreadTest(ReactorTestCase):
def setUp(self):
super(ReactorCallFromThreadTest, self).setUp()
self._mainThread = thread.get_ident()
def tearDown(self):
self._thread.join()
super(ReactorCallFromThreadTest, self).tearDown()
def _newThreadRun(self):
self.assertNotEqual(self._mainThread, thread.get_ident())
if hasattr(self._thread, 'ident'): # new in python 2.6
self.assertEqual(self._thread.ident, thread.get_ident())
self._reactor.callFromThread(self._fnCalledFromThread)
def _fnCalledFromThread(self):
self.assertEqual(self._mainThread, thread.get_ident())
self._reactor.stop()
def _whenRunningCallback(self):
self._thread = threading.Thread(target=self._newThreadRun)
self._thread.start()
def testCallFromThread(self):
self._reactor.callWhenRunning(self._whenRunningCallback)
self._reactor.run()
@skipIfNoTwisted
class ReactorCallInThread(ReactorTestCase):
def setUp(self):
super(ReactorCallInThread, self).setUp()
self._mainThread = thread.get_ident()
def _fnCalledInThread(self, *args, **kwargs):
self.assertNotEqual(thread.get_ident(), self._mainThread)
self._reactor.callFromThread(lambda: self._reactor.stop())
def _whenRunningCallback(self):
self._reactor.callInThread(self._fnCalledInThread)
def testCallInThread(self):
self._reactor.callWhenRunning(self._whenRunningCallback)
self._reactor.run()
if have_twisted:
@implementer(IReadDescriptor)
class Reader(object):
def __init__(self, fd, callback):
self._fd = fd
self._callback = callback
def logPrefix(self):
return "Reader"
def close(self):
self._fd.close()
def fileno(self):
return self._fd.fileno()
def readConnectionLost(self, reason):
self.close()
def connectionLost(self, reason):
self.close()
def doRead(self):
self._callback(self._fd)
@implementer(IWriteDescriptor)
class Writer(object):
def __init__(self, fd, callback):
self._fd = fd
self._callback = callback
def logPrefix(self):
return "Writer"
def close(self):
self._fd.close()
def fileno(self):
return self._fd.fileno()
def connectionLost(self, reason):
self.close()
def doWrite(self):
self._callback(self._fd)
@skipIfNoTwisted
class ReactorReaderWriterTest(ReactorTestCase):
def _set_nonblocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def setUp(self):
super(ReactorReaderWriterTest, self).setUp()
r, w = os.pipe()
self._set_nonblocking(r)
self._set_nonblocking(w)
set_close_exec(r)
set_close_exec(w)
self._p1 = os.fdopen(r, "rb", 0)
self._p2 = os.fdopen(w, "wb", 0)
def tearDown(self):
super(ReactorReaderWriterTest, self).tearDown()
self._p1.close()
self._p2.close()
def _testReadWrite(self):
"""
In this test the writer writes an 'x' to its fd. The reader
reads it, check the value and ends the test.
"""
self.shouldWrite = True
def checkReadInput(fd):
self.assertEquals(fd.read(1), b'x')
self._reactor.stop()
def writeOnce(fd):
if self.shouldWrite:
self.shouldWrite = False
fd.write(b'x')
self._reader = Reader(self._p1, checkReadInput)
self._writer = Writer(self._p2, writeOnce)
self._reactor.addWriter(self._writer)
# Test that adding the reader twice adds it only once to
# IOLoop.
self._reactor.addReader(self._reader)
self._reactor.addReader(self._reader)
def testReadWrite(self):
self._reactor.callWhenRunning(self._testReadWrite)
self._reactor.run()
def _testNoWriter(self):
"""
In this test we have no writer. Make sure the reader doesn't
read anything.
"""
def checkReadInput(fd):
self.fail("Must not be called.")
def stopTest():
# Close the writer here since the IOLoop doesn't know
# about it.
self._writer.close()
self._reactor.stop()
self._reader = Reader(self._p1, checkReadInput)
# We create a writer, but it should never be invoked.
self._writer = Writer(self._p2, lambda fd: fd.write('x'))
# Test that adding and removing the writer leaves us with no writer.
self._reactor.addWriter(self._writer)
self._reactor.removeWriter(self._writer)
# Test that adding and removing the reader doesn't cause
# unintended effects.
self._reactor.addReader(self._reader)
# Wake up after a moment and stop the test
self._reactor.callLater(0.001, stopTest)
def testNoWriter(self):
self._reactor.callWhenRunning(self._testNoWriter)
self._reactor.run()
# Test various combinations of twisted and tornado http servers,
# http clients, and event loop interfaces.
@skipIfNoTwisted
@unittest.skipIf(not have_twisted_web, 'twisted web not present')
class CompatibilityTests(unittest.TestCase):
def setUp(self):
self.saved_signals = save_signal_handlers()
self.io_loop = IOLoop()
self.io_loop.make_current()
self.reactor = TornadoReactor(self.io_loop)
def tearDown(self):
self.reactor.disconnectAll()
self.io_loop.clear_current()
self.io_loop.close(all_fds=True)
restore_signal_handlers(self.saved_signals)
def start_twisted_server(self):
class HelloResource(Resource):
isLeaf = True
def render_GET(self, request):
return "Hello from twisted!"
site = Site(HelloResource())
port = self.reactor.listenTCP(0, site, interface='127.0.0.1')
self.twisted_port = port.getHost().port
def start_tornado_server(self):
class HelloHandler(RequestHandler):
def get(self):
self.write("Hello from salt.ext.tornado!")
app = Application([('/', HelloHandler)],
log_function=lambda x: None)
server = HTTPServer(app, io_loop=self.io_loop)
sock, self.tornado_port = bind_unused_port()
server.add_sockets([sock])
def run_ioloop(self):
self.stop_loop = self.io_loop.stop
self.io_loop.start()
self.reactor.fireSystemEvent('shutdown')
def run_reactor(self):
self.stop_loop = self.reactor.stop
self.stop = self.reactor.stop
self.reactor.run()
def tornado_fetch(self, url, runner):
responses = []
client = AsyncHTTPClient(self.io_loop)
def callback(response):
responses.append(response)
self.stop_loop()
client.fetch(url, callback=callback)
runner()
self.assertEqual(len(responses), 1)
responses[0].rethrow()
return responses[0]
def twisted_fetch(self, url, runner):
# http://twistedmatrix.com/documents/current/web/howto/client.html
chunks = []
client = Agent(self.reactor)
d = client.request(b'GET', utf8(url))
class Accumulator(Protocol):
def __init__(self, finished):
self.finished = finished
def dataReceived(self, data):
chunks.append(data)
def connectionLost(self, reason):
self.finished.callback(None)
def callback(response):
finished = Deferred()
response.deliverBody(Accumulator(finished))
return finished
d.addCallback(callback)
def shutdown(failure):
if hasattr(self, 'stop_loop'):
self.stop_loop()
elif failure is not None:
# loop hasn't been initialized yet; try our best to
# get an error message out. (the runner() interaction
# should probably be refactored).
try:
failure.raiseException()
except:
logging.error('exception before starting loop', exc_info=True)
d.addBoth(shutdown)
runner()
self.assertTrue(chunks)
return ''.join(chunks)
def twisted_coroutine_fetch(self, url, runner):
body = [None]
@gen.coroutine
def f():
# This is simpler than the non-coroutine version, but it cheats
# by reading the body in one blob instead of streaming it with
# a Protocol.
client = Agent(self.reactor)
response = yield client.request(b'GET', utf8(url))
with warnings.catch_warnings():
# readBody has a buggy DeprecationWarning in Twisted 15.0:
# https://twistedmatrix.com/trac/changeset/43379
warnings.simplefilter('ignore', category=DeprecationWarning)
body[0] = yield readBody(response)
self.stop_loop()
self.io_loop.add_callback(f)
runner()
return body[0]
def testTwistedServerTornadoClientIOLoop(self):
self.start_twisted_server()
response = self.tornado_fetch(
'http://127.0.0.1:%d' % self.twisted_port, self.run_ioloop)
self.assertEqual(response.body, 'Hello from twisted!')
def testTwistedServerTornadoClientReactor(self):
self.start_twisted_server()
response = self.tornado_fetch(
'http://127.0.0.1:%d' % self.twisted_port, self.run_reactor)
self.assertEqual(response.body, 'Hello from twisted!')
def testTornadoServerTwistedClientIOLoop(self):
self.start_tornado_server()
response = self.twisted_fetch(
'http://127.0.0.1:%d' % self.tornado_port, self.run_ioloop)
self.assertEqual(response, 'Hello from salt.ext.tornado!')
def testTornadoServerTwistedClientReactor(self):
self.start_tornado_server()
response = self.twisted_fetch(
'http://127.0.0.1:%d' % self.tornado_port, self.run_reactor)
self.assertEqual(response, 'Hello from salt.ext.tornado!')
@skipIfPy26
def testTornadoServerTwistedCoroutineClientIOLoop(self):
self.start_tornado_server()
response = self.twisted_coroutine_fetch(
'http://127.0.0.1:%d' % self.tornado_port, self.run_ioloop)
self.assertEqual(response, 'Hello from salt.ext.tornado!')
@skipIfNoTwisted
@skipIfPy26
class ConvertDeferredTest(unittest.TestCase):
def test_success(self):
@inlineCallbacks
def fn():
if False:
# inlineCallbacks doesn't work with regular functions;
# must have a yield even if it's unreachable.
yield
returnValue(42)
f = gen.convert_yielded(fn())
self.assertEqual(f.result(), 42)
def test_failure(self):
@inlineCallbacks
def fn():
if False:
yield
1 / 0
f = gen.convert_yielded(fn())
with self.assertRaises(ZeroDivisionError):
f.result()
if have_twisted:
# Import and run as much of twisted's test suite as possible.
# This is unfortunately rather dependent on implementation details,
# but there doesn't appear to be a clean all-in-one conformance test
# suite for reactors.
#
# This is a list of all test suites using the ReactorBuilder
# available in Twisted 11.0.0 and 11.1.0 (and a blacklist of
# specific test methods to be disabled).
twisted_tests = {
'twisted.internet.test.test_core.ObjectModelIntegrationTest': [],
'twisted.internet.test.test_core.SystemEventTestsBuilder': [
'test_iterate', # deliberately not supported
# Fails on TwistedIOLoop and AsyncIOLoop.
'test_runAfterCrash',
],
'twisted.internet.test.test_fdset.ReactorFDSetTestsBuilder': [
"test_lostFileDescriptor", # incompatible with epoll and kqueue
],
'twisted.internet.test.test_process.ProcessTestsBuilder': [
# Only work as root. Twisted's "skip" functionality works
# with py27+, but not unittest2 on py26.
'test_changeGID',
'test_changeUID',
# This test sometimes fails with EPIPE on a call to
# kqueue.control. Happens consistently for me with
# trollius but not asyncio or other IOLoops.
'test_childConnectionLost',
],
# Process tests appear to work on OSX 10.7, but not 10.6
# 'twisted.internet.test.test_process.PTYProcessTestsBuilder': [
# 'test_systemCallUninterruptedByChildExit',
# ],
'twisted.internet.test.test_tcp.TCPClientTestsBuilder': [
'test_badContext', # ssl-related; see also SSLClientTestsMixin
],
'twisted.internet.test.test_tcp.TCPPortTestsBuilder': [
# These use link-local addresses and cause firewall prompts on mac
'test_buildProtocolIPv6AddressScopeID',
'test_portGetHostOnIPv6ScopeID',
'test_serverGetHostOnIPv6ScopeID',
'test_serverGetPeerOnIPv6ScopeID',
],
'twisted.internet.test.test_tcp.TCPConnectionTestsBuilder': [],
'twisted.internet.test.test_tcp.WriteSequenceTests': [],
'twisted.internet.test.test_tcp.AbortConnectionTestCase': [],
'twisted.internet.test.test_threads.ThreadTestsBuilder': [],
'twisted.internet.test.test_time.TimeTestsBuilder': [],
# Extra third-party dependencies (pyOpenSSL)
# 'twisted.internet.test.test_tls.SSLClientTestsMixin': [],
'twisted.internet.test.test_udp.UDPServerTestsBuilder': [],
'twisted.internet.test.test_unix.UNIXTestsBuilder': [
# Platform-specific. These tests would be skipped automatically
# if we were running twisted's own test runner.
'test_connectToLinuxAbstractNamespace',
'test_listenOnLinuxAbstractNamespace',
# These tests use twisted's sendmsg.c extension and sometimes
# fail with what looks like uninitialized memory errors
# (more common on pypy than cpython, but I've seen it on both)
'test_sendFileDescriptor',
'test_sendFileDescriptorTriggersPauseProducing',
'test_descriptorDeliveredBeforeBytes',
'test_avoidLeakingFileDescriptors',
],
'twisted.internet.test.test_unix.UNIXDatagramTestsBuilder': [
'test_listenOnLinuxAbstractNamespace',
],
'twisted.internet.test.test_unix.UNIXPortTestsBuilder': [],
}
if sys.version_info >= (3,):
# In Twisted 15.2.0 on Python 3.4, the process tests will try to run
# but fail, due in part to interactions between Tornado's strict
# warnings-as-errors policy and Twisted's own warning handling
# (it was not obvious how to configure the warnings module to
# reconcile the two), and partly due to what looks like a packaging
# error (process_cli.py missing). For now, just skip it.
del twisted_tests['twisted.internet.test.test_process.ProcessTestsBuilder']
for test_name, blacklist in twisted_tests.items():
try:
test_class = import_object(test_name)
except (ImportError, AttributeError):
continue
for test_func in blacklist: # type: ignore
if hasattr(test_class, test_func):
# The test_func may be defined in a mixin, so clobber
# it instead of delattr()
setattr(test_class, test_func, lambda self: None)
def make_test_subclass(test_class):
class TornadoTest(test_class): # type: ignore
_reactors = ["tornado.platform.twisted._TestReactor"]
def setUp(self):
# Twisted's tests expect to be run from a temporary
# directory; they create files in their working directory
# and don't always clean up after themselves.
self.__curdir = os.getcwd()
self.__tempdir = tempfile.mkdtemp()
os.chdir(self.__tempdir)
super(TornadoTest, self).setUp() # type: ignore
def tearDown(self):
super(TornadoTest, self).tearDown() # type: ignore
os.chdir(self.__curdir)
shutil.rmtree(self.__tempdir)
def flushWarnings(self, *args, **kwargs):
# This is a hack because Twisted and Tornado have
# differing approaches to warnings in tests.
# Tornado sets up a global set of warnings filters
# in runtests.py, while Twisted patches the filter
# list in each test. The net effect is that
# Twisted's tests run with Tornado's increased
# strictness (BytesWarning and ResourceWarning are
# enabled) but without our filter rules to ignore those
# warnings from Twisted code.
filtered = []
for w in super(TornadoTest, self).flushWarnings( # type: ignore
*args, **kwargs):
if w['category'] in (BytesWarning, ResourceWarning):
continue
filtered.append(w)
return filtered
def buildReactor(self):
self.__saved_signals = save_signal_handlers()
return test_class.buildReactor(self)
def unbuildReactor(self, reactor):
test_class.unbuildReactor(self, reactor)
# Clean up file descriptors (especially epoll/kqueue
# objects) eagerly instead of leaving them for the
# GC. Unfortunately we can't do this in reactor.stop
# since twisted expects to be able to unregister
# connections in a post-shutdown hook.
reactor._io_loop.close(all_fds=True)
restore_signal_handlers(self.__saved_signals)
TornadoTest.__name__ = test_class.__name__
return TornadoTest
test_subclass = make_test_subclass(test_class)
globals().update(test_subclass.makeTestCaseClasses())
# Since we're not using twisted's test runner, it's tricky to get
# logging set up well. Most of the time it's easiest to just
# leave it turned off, but while working on these tests you may want
# to uncomment one of the other lines instead.
log.defaultObserver.stop()
# import sys; log.startLogging(sys.stderr, setStdout=0)
# log.startLoggingWithObserver(log.PythonLoggingObserver().emit, setStdout=0)
# import logging; logging.getLogger('twisted').setLevel(logging.WARNING)
# Twisted recently introduced a new logger; disable that one too.
try:
from twisted.logger import globalLogBeginner # type: ignore
except ImportError:
pass
else:
globalLogBeginner.beginLoggingTo([], redirectStandardIO=False)
if have_twisted:
class LayeredTwistedIOLoop(TwistedIOLoop):
"""Layers a TwistedIOLoop on top of a TornadoReactor on a SelectIOLoop.
This is of course silly, but is useful for testing purposes to make
sure we're implementing both sides of the various interfaces
correctly. In some tests another TornadoReactor is layered on top
of the whole stack.
"""
def initialize(self, **kwargs):
# When configured to use LayeredTwistedIOLoop we can't easily
# get the next-best IOLoop implementation, so use the lowest common
# denominator.
self.real_io_loop = SelectIOLoop(make_current=False) # type: ignore
reactor = TornadoReactor(io_loop=self.real_io_loop)
super(LayeredTwistedIOLoop, self).initialize(reactor=reactor, **kwargs)
self.add_callback(self.make_current)
def close(self, all_fds=False):
super(LayeredTwistedIOLoop, self).close(all_fds=all_fds)
# HACK: This is the same thing that test_class.unbuildReactor does.
for reader in self.reactor._internalReaders:
self.reactor.removeReader(reader)
reader.connectionLost(None)
self.real_io_loop.close(all_fds=all_fds)
def stop(self):
# One of twisted's tests fails if I don't delay crash()
# until the reactor has started, but if I move this to
# TwistedIOLoop then the tests fail when I'm *not* running
# tornado-on-twisted-on-tornado. I'm clearly missing something
# about the startup/crash semantics, but since stop and crash
# are really only used in tests it doesn't really matter.
def f():
self.reactor.crash()
# Become current again on restart. This is needed to
# override real_io_loop's claim to being the current loop.
self.add_callback(self.make_current)
self.reactor.callWhenRunning(f)
if __name__ == "__main__":
unittest.main()
|
__init__.py
|
from __future__ import print_function, unicode_literals
import zmq
import zmq.auth
from zmq.auth.thread import ThreadAuthenticator
from zmq.utils.monitor import recv_monitor_message
import sys
import os
import json
import time
import multiprocessing
try:
import queue
except ImportError:
import Queue as queue
EVENT_MAP = {}
class SockProcess(object):
if (sys.version_info.major == 3):
desired_type = str
else:
desired_type = unicode
connected = False
identity = ""
endpoint = ""
socket = None
last_activity = 0
def __init__(self, certificate_path):
self.subs = {}
self.message_queue = []
self.keys_dir = certificate_path
if not os.path.exists(self.keys_dir):
logging.critical("Certificates are missing")
sys.exit(1)
def set_queues(self, input_queue, output_queue, log_queue):
self.input_queue = input_queue
self.output_queue = output_queue
self.log_queue = log_queue
def log(self, message):
print(message)
self.log_queue.put(message)
def create_socket(self):
self.log("Creating context")
self.context = zmq.Context()
self.log("Creating socket")
self.socket = self.context.socket(zmq.DEALER)
# Based on ironhouse.py
client_secret_file = os.path.join(self.keys_dir, "client.key_secret")
client_public, client_secret = zmq.auth.load_certificate(client_secret_file)
self.socket.curve_secretkey = client_secret
self.socket.curve_publickey = client_public
server_public_file = os.path.join(self.keys_dir, "server.key")
server_public, _ = zmq.auth.load_certificate(server_public_file)
# The client must know the server's public key to make a CURVE connection.
self.socket.curve_serverkey = server_public
self.log("Creating monitor")
self.monitor = self.socket.get_monitor_socket()
def _connect(self):
num = int(time.time())%100
iden = "{}_{}".format(self.identity, num)
self.log("Connecting as {}".format(iden))
if (sys.version_info.major == 3 or type(iden) is self.desired_type):
self.socket.identity = iden.encode('utf-8')
else:
self.socket.identity = iden
while True:
self.log("Requesting port")
qsock = self.context.socket(zmq.REQ)
client_secret_file = os.path.join(self.keys_dir, "client.key_secret")
client_public, client_secret = zmq.auth.load_certificate(client_secret_file)
qsock.curve_secretkey = client_secret
qsock.curve_publickey = client_public
server_public_file = os.path.join(self.keys_dir, "server.key")
server_public, _ = zmq.auth.load_certificate(server_public_file)
# The client must know the server's public key to make a CURVE connection.
qsock.curve_serverkey = server_public
qsock.set(zmq.LINGER, 1)
qsock.connect(self.endpoint.format("5141"))
qsock.send(b'')
if qsock.poll(timeout=1000):
port = int(qsock.recv())
self.log("Got port {}".format(port))
qsock.close()
break
else:
self.log("Timeout requesting port")
qsock.close()
self.socket.connect(self.endpoint.format(port))
self.log("Socket connect requested")
self.last_activity = time.time()
def connect(self, identity, endpoint):
self.identity = identity
self.endpoint = endpoint
self.reconnect()
def reconnect(self):
self.log("Reconnecting")
self.connected = False
if self.socket:
self.log("Closing socket")
self.socket.close(1)
self.log("Destroying context")
self.context.destroy(1)
self.create_socket()
self._connect()
def raw_send_multipart(self, safe_args):
if self.connected:
if (safe_args[0] != b'PONG'):
self.log('SENDING TO {}: {}'.format(safe_args[0], repr(safe_args[1:])[0:100]))
self.socket.send_multipart(safe_args)
else:
self.log('QUEUED FOR {}: {}'.format(safe_args[0], repr(safe_args[1:])[0:100]))
self.message_queue.append(safe_args)
def send_multipart(self, *args):
safe_args = [(a.encode('utf-8', 'backslashreplace') if (type(a) is self.desired_type) else a) for a in args]
self.raw_send_multipart(safe_args)
def send_SUB(self, channel):
channel = channel.lower()
if self.connected:
self.send_multipart('SUB', channel)
self.subs[channel] = True
def handle_PING(self, *args):
self.send_multipart('PONG')
def handle_PONG(self, *args):
pass
def handle_RECONNECT(self, *args):
self.log('Reconnect received!')
self.reconnect()
def main_loop(self, identity, endpoint):
print('In proc')
self.connect(identity, endpoint)
try:
while True:
# First the outgoing instructions...
try:
command = self.input_queue.get(False)
# Command is like ('SUB', channel)
handler = getattr(self, 'send_{}'.format(command[0].upper()), None)
if handler:
handler(*command[1:])
else:
self.send_multipart(*command)
except queue.Empty:
pass # Nothing to pull
# Now the incoming
nowish = time.time()
if (self.socket.poll(timeout=100)):
self.last_activity = nowish
data = self.socket.recv_multipart()
if (sys.version_info.major == 3):
safe_data = [d.decode('utf-8') for d in data]
else:
safe_data = [d for d in data]
if (safe_data[0] != 'PING'):
self.log(repr(safe_data)[0:100])
handler = getattr(self, 'handle_{}'.format(safe_data[0]), None)
if handler:
handler(*safe_data)
else:
self.output_queue.put(safe_data)
# Did the server go quiet?
if (nowish - 30 > self.last_activity):
self.log('No recent activity, reconnecting!')
self.reconnect()
# Check for useful events
if (self.monitor.closed == False and self.monitor.poll(timeout=100)):
evt = recv_monitor_message(self.monitor)
evt.update({'description': EVENT_MAP[evt['event']]})
if evt['event'] not in (zmq.EVENT_CONNECT_RETRIED,
zmq.EVENT_CONNECT_DELAYED,
zmq.EVENT_CLOSED):
# Completely ignore these 3 events because they spam too much.
self.log("Event: {}".format(evt))
if evt['event'] == zmq.EVENT_CONNECTED:
self.connected = True
self.send_multipart('CONNECT')
for c in self.subs:
self.send_multipart('SUB', c)
while self.message_queue:
self.raw_send_multipart(self.message_queue.pop(0))
if evt['event'] == zmq.EVENT_DISCONNECTED:
self.log('DISCONNECT')
self.reconnect()
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
break
except zmq.ZMQError as e:
self.log('Exception!')
if e.errno == zmq.ETERM:
pass # Interrupted
else:
raise
self.log('Exiting thread!')
class Socket(object):
singleton = None
subs = None
def __init__(self, identity, endpoint, certificate_path, delayStart=False):
global EVENT_MAP
for name in dir(zmq):
if name.startswith('EVENT_'):
value = getattr(zmq, name)
EVENT_MAP[value] = name
m = multiprocessing.Manager()
self.manager = m
# For sanity the input and output queues are reversed on the other end
# Our input_queue is SockProcess's output_queue
# The swap happens in the reversal of the first two args passed to set_queues
self.input_queue = m.Queue()
self.output_queue = m.Queue()
self.log_queue = m.Queue()
self.sockproc = SockProcess(certificate_path)
self.sockproc.set_queues(self.output_queue, self.input_queue, self.log_queue)
# After 3.3 it would be better to pass daemon as a kwarg in this constructor
self.proc = multiprocessing.Process(
target=self.sockproc.main_loop,
args=(identity, endpoint))
# But since this supports lower than 3.3, set the daemon flag like this
self.proc.daemon=True
if not delayStart:
self.proc.start()
Socket.singleton = self
self.subs = {}
def start():
self.proc.start()
###
def start_thread(self):
self.running = True
import threading
self._thread = threading.Thread(target=self.thread)
self._thread.setDaemon(True)
self._thread.start()
def stop_thread(self):
print('Stopping')
self.running = False
self._thread.join()
def twisted_call_safely(self, func, args):
if (args[0] != 'PING'):
print('(Twisted_call_safely) '+repr(args)[0:100])
self.reactor.callFromThread(func, *args)
def tornado_call_safely(self, func, args):
if (args[0] != 'PING'):
print('(Tornado_call_safely) '+repr(args)[0:100])
self.ioloop.add_callback(func, *args)
def direct_call_safely(self, func, args):
if (args[0] != 'PING'):
print('(Direct_call_safely) '+str(func)+' '+repr(args)[0:100])
func(*args)
def twisted_register(self, reactor):
self.reactor = reactor
self.call_safely = self.twisted_call_safely
self.start_thread()
def tornado_register(self, ioloop):
self.ioloop = ioloop
self.call_safely = self.tornado_call_safely
self.start_thread()
def foreground(self):
self.call_safely = self.direct_call_safely
self.running = True
self.thread()
###
@staticmethod
def get_sock():
return Socket.singleton
@staticmethod
def receiving(channel):
def baked_subscription(f):
Socket.singleton.subscribe(channel, f)
return f
return baked_subscription
@staticmethod
def chain(channel):
"""This is intended to be used with funchain"""
import funchain
base = funchain.Chain()
def baked_chain_link(cmd, channel, body):
try:
return base(body)
except funchain.AsyncCall:
# Won't finish immediately
return None
Socket.singleton.subscribe(channel, baked_chain_link)
return base
###
def handle_MSG(self, cmd, chan, body):
cb = self.subs.get(chan, None)
if cb:
cb(cmd, chan, json.loads(body))
def thread(self):
print('in thread')
while self.running:
try:
msg = self.log_queue.get(False)
print("(proc) "+msg)
except queue.Empty:
pass # Nothing to pull
except EOFError:
print("!?")
break
try:
command = self.input_queue.get(False)
# Command is like ('MSG', channel, jsonblob)
handler = getattr(self, 'handle_{}'.format(command[0].upper()), None)
if handler:
self.call_safely(handler, command)
except queue.Empty:
#print("+")
pass # Nothing to pull
except EOFError:
break
time.sleep(0.05)
print('Exiting thread!')
###
def send_multipart(self, *args):
self.output_queue.put(args)
def msg(self, channel, message):
self.send_multipart('MSG', channel, json.dumps(message))
def msgstat(self, count):
self.send_multipart('MSGSTAT', 'OK', count)
def subscribe(self, channel, callback):
channel = channel.lower()
self.send_multipart('SUB', channel)
self.subs[channel] = callback
def ping(self):
self.send_multipart('PING')
def pong(self):
self.send_multipart('PONG')
def connect(self, endpoint_fmt):
self.send_multipart('CONNECT')
def reconnect(self):
self.send_multipart('RECONNECT')
|
test.py
|
# -*- coding: utf8 -*-
import sys
import os
IS_PY3 = sys.version_info[0] == 3
IS_PY2 = not IS_PY3
MINOR_VER = sys.version_info[1]
# coverage doesn't work in python 3.1, 3.2 due to it just being a shit
# python
HAS_UNICODE_LITERAL = not (IS_PY3 and MINOR_VER in (1, 2))
cov = None
if HAS_UNICODE_LITERAL:
run_idx = int(os.environ.pop("SH_TEST_RUN_IDX", "0"))
first_run = run_idx == 0
try:
import coverage
except ImportError:
pass
else:
# for some reason, we can't run auto_data on the first run, or the coverage
# numbers get really screwed up
auto_data = True
if first_run:
auto_data = False
cov = coverage.Coverage(auto_data=auto_data)
if first_run:
cov.erase()
cov.start()
from os.path import exists, join, realpath, dirname, split
import unittest
try:
import unittest.mock
except ImportError:
HAS_MOCK = False
else:
HAS_MOCK = True
import tempfile
import warnings
import pty
import resource
import logging
import sys
from contextlib import contextmanager
import sh
import signal
import errno
import stat
import fcntl
import platform
from functools import wraps
import time
import inspect
# we have to use the real path because on osx, /tmp is a symlink to
# /private/tmp, and so assertions that gettempdir() == sh.pwd() will fail
tempdir = realpath(tempfile.gettempdir())
IS_MACOS = platform.system() in ("AIX", "Darwin")
# these 3 functions are helpers for modifying PYTHONPATH with a module's main
# directory
def append_pythonpath(env, path):
key = "PYTHONPATH"
pypath = [p for p in env.get(key, "").split(":") if p]
pypath.insert(0, path)
pypath = ":".join(pypath)
env[key] = pypath
def get_module_import_dir(m):
mod_file = inspect.getsourcefile(m)
is_package = mod_file.endswith("__init__.py")
mod_dir = dirname(mod_file)
if is_package:
mod_dir, _ = split(mod_dir)
return mod_dir
def append_module_path(env, m):
append_pythonpath(env, get_module_import_dir(m))
if IS_PY3:
xrange = range
unicode = str
long = int
from io import StringIO
ioStringIO = StringIO
from io import BytesIO as cStringIO
iocStringIO = cStringIO
else:
from StringIO import StringIO
from cStringIO import StringIO as cStringIO
from io import StringIO as ioStringIO
from io import BytesIO as iocStringIO
THIS_DIR = dirname(os.path.abspath(__file__))
system_python = sh.Command(sys.executable)
# this is to ensure that our `python` helper here is able to import our local sh
# module, and not the system one
baked_env = os.environ.copy()
append_module_path(baked_env, sh)
python = system_python.bake(_env=baked_env)
if hasattr(logging, 'NullHandler'):
NullHandler = logging.NullHandler
else:
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
skipUnless = getattr(unittest, "skipUnless", None)
if not skipUnless:
# our stupid skipUnless wrapper for python2.6
def skipUnless(condition, reason):
def wrapper(test):
if condition:
return test
else:
@wraps(test)
def skip(*args, **kwargs):
return
return skip
return wrapper
skip_unless = skipUnless
def requires_progs(*progs):
missing = []
for prog in progs:
try:
sh.Command(prog)
except sh.CommandNotFound:
missing.append(prog)
friendly_missing = ", ".join(missing)
return skipUnless(len(missing) == 0, "Missing required system programs: %s"
% friendly_missing)
requires_posix = skipUnless(os.name == "posix", "Requires POSIX")
requires_utf8 = skipUnless(sh.DEFAULT_ENCODING == "UTF-8", "System encoding must be UTF-8")
not_macos = skipUnless(not IS_MACOS, "Doesn't work on MacOS")
requires_py3 = skipUnless(IS_PY3, "Test only works on Python 3")
requires_py35 = skipUnless(IS_PY3 and MINOR_VER >= 5, "Test only works on Python 3.5 or higher")
def requires_poller(poller):
use_select = bool(int(os.environ.get("SH_TESTS_USE_SELECT", "0")))
cur_poller = "select" if use_select else "poll"
return skipUnless(cur_poller == poller, "Only enabled for select.%s" % cur_poller)
@contextmanager
def ulimit(key, new_soft):
soft, hard = resource.getrlimit(key)
resource.setrlimit(key, (new_soft, hard))
try:
yield
finally:
resource.setrlimit(key, (soft, hard))
def create_tmp_test(code, prefix="tmp", delete=True, **kwargs):
""" creates a temporary test file that lives on disk, on which we can run
python with sh """
py = tempfile.NamedTemporaryFile(prefix=prefix, delete=delete)
code = code.format(**kwargs)
if IS_PY3:
code = code.encode("UTF-8")
py.write(code)
py.flush()
# make the file executable
st = os.stat(py.name)
os.chmod(py.name, st.st_mode | stat.S_IEXEC)
# we don't explicitly close, because close will remove the file, and we
# don't want that until the test case is done. so we let the gc close it
# when it goes out of scope
return py
class BaseTests(unittest.TestCase):
def assert_oserror(self, num, fn, *args, **kwargs):
try:
fn(*args, **kwargs)
except OSError as e:
self.assertEqual(e.errno, num)
def assert_deprecated(self, fn, *args, **kwargs):
with warnings.catch_warnings(record=True) as w:
fn(*args, **kwargs)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
@requires_posix
class FunctionalTests(BaseTests):
def setUp(self):
self._environ = os.environ.copy()
def tearDown(self):
os.environ = self._environ
def test_print_command(self):
from sh import ls, which
actual_location = which("ls")
out = str(ls)
self.assertEqual(out, actual_location)
def test_unicode_arg(self):
from sh import echo
test = "漢字"
if not IS_PY3:
test = test.decode("utf8")
p = echo(test, _encoding="utf8")
output = p.strip()
self.assertEqual(test, output)
def test_unicode_exception(self):
from sh import ErrorReturnCode
py = create_tmp_test("exit(1)")
arg = "漢字"
native_arg = arg
if not IS_PY3:
arg = arg.decode("utf8")
try:
python(py.name, arg, _encoding="utf8")
except ErrorReturnCode as e:
self.assertTrue(native_arg in str(e))
else:
self.fail("exception wasn't raised")
def test_pipe_fd(self):
py = create_tmp_test("""print("hi world")""")
read_fd, write_fd = os.pipe()
python(py.name, _out=write_fd)
out = os.read(read_fd, 10)
self.assertEqual(out, b"hi world\n")
def test_trunc_exc(self):
py = create_tmp_test("""
import sys
sys.stdout.write("a" * 1000)
sys.stderr.write("b" * 1000)
exit(1)
""")
self.assertRaises(sh.ErrorReturnCode, python, py.name)
def test_number_arg(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
options, args = parser.parse_args()
print(args[0])
""")
out = python(py.name, 3).strip()
self.assertEqual(out, "3")
def test_empty_stdin_no_hang(self):
py = create_tmp_test("""
import sys
data = sys.stdin.read()
sys.stdout.write("no hang")
""")
out = python(py.name, _in="", _timeout=2)
self.assertEqual(out, "no hang")
out = python(py.name, _in=None, _timeout=2)
self.assertEqual(out, "no hang")
def test_exit_code(self):
from sh import ErrorReturnCode
py = create_tmp_test("""
exit(3)
""")
self.assertRaises(ErrorReturnCode, python, py.name)
def test_patched_glob(self):
from glob import glob
py = create_tmp_test("""
import sys
print(sys.argv[1:])
""")
files = glob("*.faowjefoajweofj")
out = python(py.name, files).strip()
self.assertEqual(out, "['*.faowjefoajweofj']")
@requires_py35
def test_patched_glob_with_recursive_argument(self):
from glob import glob
py = create_tmp_test("""
import sys
print(sys.argv[1:])
""")
files = glob("*.faowjefoajweofj", recursive=True)
out = python(py.name, files).strip()
self.assertEqual(out, "['*.faowjefoajweofj']")
def test_exit_code_with_hasattr(self):
from sh import ErrorReturnCode
py = create_tmp_test("""
exit(3)
""")
try:
out = python(py.name, _iter=True)
# hasattr can swallow exceptions
hasattr(out, 'something_not_there')
list(out)
self.assertEqual(out.exit_code, 3)
self.fail("Command exited with error, but no exception thrown")
except ErrorReturnCode as e:
pass
def test_exit_code_from_exception(self):
from sh import ErrorReturnCode
py = create_tmp_test("""
exit(3)
""")
self.assertRaises(ErrorReturnCode, python, py.name)
try:
python(py.name)
except Exception as e:
self.assertEqual(e.exit_code, 3)
def test_stdin_from_string(self):
from sh import sed
self.assertEqual(sed(_in="one test three", e="s/test/two/").strip(),
"one two three")
def test_ok_code(self):
from sh import ls, ErrorReturnCode_1, ErrorReturnCode_2
exc_to_test = ErrorReturnCode_2
code_to_pass = 2
if IS_MACOS:
exc_to_test = ErrorReturnCode_1
code_to_pass = 1
self.assertRaises(exc_to_test, ls, "/aofwje/garogjao4a/eoan3on")
ls("/aofwje/garogjao4a/eoan3on", _ok_code=code_to_pass)
ls("/aofwje/garogjao4a/eoan3on", _ok_code=[code_to_pass])
ls("/aofwje/garogjao4a/eoan3on", _ok_code=range(code_to_pass + 1))
def test_ok_code_none(self):
py = create_tmp_test("exit(0)")
python(py.name, _ok_code=None)
def test_quote_escaping(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
options, args = parser.parse_args()
print(args)
""")
out = python(py.name, "one two three").strip()
self.assertEqual(out, "['one two three']")
out = python(py.name, "one \"two three").strip()
self.assertEqual(out, "['one \"two three']")
out = python(py.name, "one", "two three").strip()
self.assertEqual(out, "['one', 'two three']")
out = python(py.name, "one", "two \"haha\" three").strip()
self.assertEqual(out, "['one', 'two \"haha\" three']")
out = python(py.name, "one two's three").strip()
self.assertEqual(out, "[\"one two's three\"]")
out = python(py.name, 'one two\'s three').strip()
self.assertEqual(out, "[\"one two's three\"]")
def test_multiple_pipes(self):
import time
py = create_tmp_test("""
import sys
import os
import time
for l in "andrew":
sys.stdout.write(l)
time.sleep(.2)
""")
inc_py = create_tmp_test("""
import sys
while True:
letter = sys.stdin.read(1)
if not letter:
break
sys.stdout.write(chr(ord(letter)+1))
""")
def inc(proc, *args, **kwargs):
return python(proc, "-u", inc_py.name, *args, **kwargs)
class Derp(object):
def __init__(self):
self.times = []
self.stdout = []
self.last_received = None
def agg(self, line):
self.stdout.append(line.strip())
now = time.time()
if self.last_received:
self.times.append(now - self.last_received)
self.last_received = now
derp = Derp()
p = inc(
inc(
inc(
python("-u", py.name, _piped=True),
_piped=True),
_piped=True),
_out=derp.agg)
p.wait()
self.assertEqual("".join(derp.stdout), "dqguhz")
self.assertTrue(all([t > .15 for t in derp.times]))
def test_manual_stdin_string(self):
from sh import tr
out = tr("[:lower:]", "[:upper:]", _in="andrew").strip()
self.assertEqual(out, "ANDREW")
def test_manual_stdin_iterable(self):
from sh import tr
test = ["testing\n", "herp\n", "derp\n"]
out = tr("[:lower:]", "[:upper:]", _in=test)
match = "".join([t.upper() for t in test])
self.assertEqual(out, match)
def test_manual_stdin_file(self):
from sh import tr
import tempfile
test_string = "testing\nherp\nderp\n"
stdin = tempfile.NamedTemporaryFile()
stdin.write(test_string.encode())
stdin.flush()
stdin.seek(0)
out = tr("[:lower:]", "[:upper:]", _in=stdin)
self.assertEqual(out, test_string.upper())
def test_manual_stdin_queue(self):
from sh import tr
try: from Queue import Queue, Empty
except ImportError: from queue import Queue, Empty
test = ["testing\n", "herp\n", "derp\n"]
q = Queue()
for t in test: q.put(t)
q.put(None) # EOF
out = tr("[:lower:]", "[:upper:]", _in=q)
match = "".join([t.upper() for t in test])
self.assertEqual(out, match)
def test_environment(self):
""" tests that environments variables that we pass into sh commands
exist in the environment, and on the sh module """
import os
# this is the environment we'll pass into our commands
env = {"HERP": "DERP"}
# first we test that the environment exists in our child process as
# we've set it
py = create_tmp_test("""
import os
for key in list(os.environ.keys()):
if key != "HERP":
del os.environ[key]
print(dict(os.environ))
""")
out = python(py.name, _env=env).strip()
self.assertEqual(out, "{'HERP': 'DERP'}")
py = create_tmp_test("""
import os, sys
sys.path.insert(0, os.getcwd())
import sh
for key in list(os.environ.keys()):
if key != "HERP":
del os.environ[key]
print(dict(HERP=sh.HERP))
""")
out = python(py.name, _env=env, _cwd=THIS_DIR).strip()
self.assertEqual(out, "{'HERP': 'DERP'}")
def test_which(self):
from sh import which, ls
self.assertEqual(which("fjoawjefojawe"), None)
self.assertEqual(which("ls"), str(ls))
def test_which_paths(self):
from sh import which
py = create_tmp_test("""
print("hi")
""")
test_path = dirname(py.name)
_, test_name = os.path.split(py.name)
found_path = which(test_name)
self.assertEqual(found_path, None)
found_path = which(test_name, [test_path])
self.assertEqual(found_path, py.name)
def test_no_close_fds(self):
# guarantee some extra fds in our parent process that don't close on exec. we have to explicitly do this
# because at some point (I believe python 3.4), python started being more stringent with closing fds to prevent
# security vulnerabilities. python 2.7, for example, doesn't set CLOEXEC on tempfile.TemporaryFile()s
#
# https://www.python.org/dev/peps/pep-0446/
tmp = [tempfile.TemporaryFile() for i in range(10)]
for t in tmp:
flags = fcntl.fcntl(t.fileno(), fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(t.fileno(), fcntl.F_SETFD, flags)
first_fd = tmp[0].fileno()
py = create_tmp_test("""
import os
print(len(os.listdir("/dev/fd")))
""")
out = python(py.name, _close_fds=False).strip()
# pick some number greater than 4, since it's hard to know exactly how many fds will be open/inherted in the
# child
self.assertTrue(int(out) > 7)
for t in tmp:
t.close()
def test_close_fds(self):
# guarantee some extra fds in our parent process that don't close on exec. we have to explicitly do this
# because at some point (I believe python 3.4), python started being more stringent with closing fds to prevent
# security vulnerabilities. python 2.7, for example, doesn't set CLOEXEC on tempfile.TemporaryFile()s
#
# https://www.python.org/dev/peps/pep-0446/
tmp = [tempfile.TemporaryFile() for i in range(10)]
for t in tmp:
flags = fcntl.fcntl(t.fileno(), fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(t.fileno(), fcntl.F_SETFD, flags)
py = create_tmp_test("""
import os
print(os.listdir("/dev/fd"))
""")
out = python(py.name).strip()
self.assertEqual(out, "['0', '1', '2', '3']")
for t in tmp:
t.close()
def test_pass_fds(self):
# guarantee some extra fds in our parent process that don't close on exec. we have to explicitly do this
# because at some point (I believe python 3.4), python started being more stringent with closing fds to prevent
# security vulnerabilities. python 2.7, for example, doesn't set CLOEXEC on tempfile.TemporaryFile()s
#
# https://www.python.org/dev/peps/pep-0446/
tmp = [tempfile.TemporaryFile() for i in range(10)]
for t in tmp:
flags = fcntl.fcntl(t.fileno(), fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(t.fileno(), fcntl.F_SETFD, flags)
last_fd = tmp[-1].fileno()
py = create_tmp_test("""
import os
print(os.listdir("/dev/fd"))
""")
out = python(py.name, _pass_fds=[last_fd]).strip()
inherited = [0, 1, 2, 3, last_fd]
inherited_str = [str(i) for i in inherited]
self.assertEqual(out, str(inherited_str))
for t in tmp:
t.close()
def test_no_arg(self):
import pwd
from sh import whoami
u1 = whoami().strip()
u2 = pwd.getpwuid(os.geteuid())[0]
self.assertEqual(u1, u2)
def test_incompatible_special_args(self):
from sh import ls
self.assertRaises(TypeError, ls, _iter=True, _piped=True)
def test_invalid_env(self):
from sh import ls
exc = TypeError
if IS_PY2 and MINOR_VER == 6:
exc = ValueError
self.assertRaises(exc, ls, _env="XXX")
self.assertRaises(exc, ls, _env={"foo": 123})
self.assertRaises(exc, ls, _env={123: "bar"})
def test_exception(self):
from sh import ErrorReturnCode_2
py = create_tmp_test("""
exit(2)
""")
self.assertRaises(ErrorReturnCode_2, python, py.name)
def test_piped_exception1(self):
from sh import ErrorReturnCode_2
py = create_tmp_test("""
import sys
sys.stdout.write("line1\\n")
sys.stdout.write("line2\\n")
exit(2)
""")
py2 = create_tmp_test("")
def fn():
list(python(python(py.name, _piped=True), "-u", py2.name, _iter=True))
self.assertRaises(ErrorReturnCode_2, fn)
def test_piped_exception2(self):
from sh import ErrorReturnCode_2
py = create_tmp_test("""
import sys
sys.stdout.write("line1\\n")
sys.stdout.write("line2\\n")
exit(2)
""")
py2 = create_tmp_test("")
def fn():
python(python(py.name, _piped=True), "-u", py2.name)
self.assertRaises(ErrorReturnCode_2, fn)
def test_command_not_found(self):
from sh import CommandNotFound
def do_import(): from sh import aowjgoawjoeijaowjellll
self.assertRaises(ImportError, do_import)
def do_import():
import sh
sh.awoefaowejfw
self.assertRaises(CommandNotFound, do_import)
def do_import():
import sh
sh.Command("ofajweofjawoe")
self.assertRaises(CommandNotFound, do_import)
def test_command_wrapper_equivalence(self):
from sh import Command, ls, which
self.assertEqual(Command(which("ls")), ls)
def test_doesnt_execute_directories(self):
save_path = os.environ['PATH']
bin_dir1 = tempfile.mkdtemp()
bin_dir2 = tempfile.mkdtemp()
gcc_dir1 = os.path.join(bin_dir1, 'gcc')
gcc_file2 = os.path.join(bin_dir2, 'gcc')
try:
os.environ['PATH'] = os.pathsep.join((bin_dir1, bin_dir2))
# a folder named 'gcc', its executable, but should not be
# discovered by internal which(1)-clone
os.makedirs(gcc_dir1)
# an executable named gcc -- only this should be executed
bunk_header = '#!/bin/sh\necho $*'
with open(gcc_file2, "w") as h:
h.write(bunk_header)
os.chmod(gcc_file2, int(0o755))
import sh
from sh import gcc
if IS_PY3:
self.assertEqual(gcc._path,
gcc_file2.encode(sh.DEFAULT_ENCODING))
else:
self.assertEqual(gcc._path, gcc_file2)
self.assertEqual(gcc('no-error').stdout.strip(),
'no-error'.encode("ascii"))
finally:
os.environ['PATH'] = save_path
if exists(gcc_file2):
os.unlink(gcc_file2)
if exists(gcc_dir1):
os.rmdir(gcc_dir1)
if exists(bin_dir1):
os.rmdir(bin_dir1)
if exists(bin_dir1):
os.rmdir(bin_dir2)
def test_multiple_args_short_option(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-l", dest="long_option")
options, args = parser.parse_args()
print(len(options.long_option.split()))
""")
num_args = int(python(py.name, l="one two three"))
self.assertEqual(num_args, 3)
num_args = int(python(py.name, "-l", "one's two's three's"))
self.assertEqual(num_args, 3)
def test_multiple_args_long_option(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-l", "--long-option", dest="long_option")
options, args = parser.parse_args()
print(len(options.long_option.split()))
""")
num_args = int(python(py.name, long_option="one two three",
nothing=False))
self.assertEqual(num_args, 3)
num_args = int(python(py.name, "--long-option", "one's two's three's"))
self.assertEqual(num_args, 3)
def test_short_bool_option(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-s", action="store_true", default=False, dest="short_option")
options, args = parser.parse_args()
print(options.short_option)
""")
self.assertTrue(python(py.name, s=True).strip() == "True")
self.assertTrue(python(py.name, s=False).strip() == "False")
self.assertTrue(python(py.name).strip() == "False")
def test_long_bool_option(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-l", "--long-option", action="store_true", default=False, dest="long_option")
options, args = parser.parse_args()
print(options.long_option)
""")
self.assertTrue(python(py.name, long_option=True).strip() == "True")
self.assertTrue(python(py.name).strip() == "False")
def test_composition(self):
from sh import ls, wc
c1 = int(wc(ls("-A1"), l=True))
c2 = len(os.listdir("."))
self.assertEqual(c1, c2)
def test_incremental_composition(self):
from sh import ls, wc
c1 = int(wc(ls("-A1", _piped=True), l=True).strip())
c2 = len(os.listdir("."))
self.assertEqual(c1, c2)
def test_short_option(self):
from sh import sh
s1 = sh(c="echo test").strip()
s2 = "test"
self.assertEqual(s1, s2)
def test_long_option(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-l", "--long-option", action="store", default="", dest="long_option")
options, args = parser.parse_args()
print(options.long_option.upper())
""")
self.assertTrue(python(py.name, long_option="testing").strip() == "TESTING")
self.assertTrue(python(py.name).strip() == "")
def test_raw_args(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--long_option", action="store", default=None,
dest="long_option1")
parser.add_option("--long-option", action="store", default=None,
dest="long_option2")
options, args = parser.parse_args()
if options.long_option1:
print(options.long_option1.upper())
else:
print(options.long_option2.upper())
""")
self.assertEqual(python(py.name,
{"long_option": "underscore"}).strip(), "UNDERSCORE")
self.assertEqual(python(py.name, long_option="hyphen").strip(), "HYPHEN")
def test_custom_separator(self):
py = create_tmp_test("""
import sys
print(sys.argv[1])
""")
opt = {"long-option": "underscore"}
correct = "--long-option=custom=underscore"
out = python(py.name, opt, _long_sep="=custom=").strip()
self.assertEqual(out, correct)
# test baking too
correct = "--long-option=baked=underscore"
python_baked = python.bake(py.name, opt, _long_sep="=baked=")
out = python_baked().strip()
self.assertEqual(out, correct)
def test_custom_separator_space(self):
py = create_tmp_test("""
import sys
print(str(sys.argv[1:]))
""")
opt = {"long-option": "space"}
correct = ["--long-option", "space"]
out = python(py.name, opt, _long_sep=" ").strip()
self.assertEqual(out, str(correct))
def test_custom_long_prefix(self):
py = create_tmp_test("""
import sys
print(sys.argv[1])
""")
out = python(py.name, {"long-option": "underscore"},
_long_prefix="-custom-").strip()
self.assertEqual(out, "-custom-long-option=underscore")
out = python(py.name, {"long-option": True},
_long_prefix="-custom-").strip()
self.assertEqual(out, "-custom-long-option")
# test baking too
out = python.bake(py.name, {"long-option": "underscore"},
_long_prefix="-baked-")().strip()
self.assertEqual(out, "-baked-long-option=underscore")
out = python.bake(py.name, {"long-option": True},
_long_prefix="-baked-")().strip()
self.assertEqual(out, "-baked-long-option")
def test_command_wrapper(self):
from sh import Command, which
ls = Command(which("ls"))
wc = Command(which("wc"))
c1 = int(wc(ls("-A1"), l=True))
c2 = len(os.listdir("."))
self.assertEqual(c1, c2)
def test_background(self):
from sh import sleep
import time
start = time.time()
sleep_time = .5
p = sleep(sleep_time, _bg=True)
now = time.time()
self.assertTrue(now - start < sleep_time)
p.wait()
now = time.time()
self.assertTrue(now - start > sleep_time)
def test_background_exception(self):
from sh import ls, ErrorReturnCode_1, ErrorReturnCode_2
p = ls("/ofawjeofj", _bg=True, _bg_exc=False) # should not raise
exc_to_test = ErrorReturnCode_2
if IS_MACOS: exc_to_test = ErrorReturnCode_1
self.assertRaises(exc_to_test, p.wait) # should raise
def test_with_context(self):
from sh import whoami
import getpass
py = create_tmp_test("""
import sys
import os
import subprocess
print("with_context")
subprocess.Popen(sys.argv[1:], shell=False).wait()
""")
cmd1 = python.bake(py.name, _with=True)
with cmd1:
out = whoami()
self.assertTrue("with_context" in out)
self.assertTrue(getpass.getuser() in out)
def test_with_context_args(self):
from sh import whoami
import getpass
py = create_tmp_test("""
import sys
import os
import subprocess
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-o", "--opt", action="store_true", default=False, dest="opt")
options, args = parser.parse_args()
if options.opt:
subprocess.Popen(args[0], shell=False).wait()
""")
with python(py.name, opt=True, _with=True):
out = whoami()
self.assertTrue(getpass.getuser() == out.strip())
with python(py.name, _with=True):
out = whoami()
self.assertTrue(out == "")
def test_binary_input(self):
py = create_tmp_test("""
import sys
data = sys.stdin.read()
sys.stdout.write(data)
""")
data = b'1234'
out = python(py.name, _in=data)
self.assertEqual(out, "1234")
def test_err_to_out(self):
py = create_tmp_test("""
import sys
import os
sys.stdout.write("stdout")
sys.stdout.flush()
sys.stderr.write("stderr")
sys.stderr.flush()
""")
stdout = python(py.name, _err_to_out=True)
self.assertEqual(stdout, "stdoutstderr")
def test_err_to_out_and_sys_stdout(self):
py = create_tmp_test("""
import sys
import os
sys.stdout.write("stdout")
sys.stdout.flush()
sys.stderr.write("stderr")
sys.stderr.flush()
""")
master, slave = os.pipe()
stdout = python(py.name, _err_to_out=True, _out=slave)
self.assertEqual(stdout, "")
self.assertEqual(os.read(master, 12), b"stdoutstderr")
def test_err_piped(self):
py = create_tmp_test("""
import sys
sys.stderr.write("stderr")
""")
py2 = create_tmp_test("""
import sys
while True:
line = sys.stdin.read()
if not line:
break
sys.stdout.write(line)
""")
out = python(python("-u", py.name, _piped="err"), "-u", py2.name)
self.assertEqual(out, "stderr")
def test_out_redirection(self):
import tempfile
py = create_tmp_test("""
import sys
import os
sys.stdout.write("stdout")
sys.stderr.write("stderr")
""")
file_obj = tempfile.NamedTemporaryFile()
out = python(py.name, _out=file_obj)
self.assertTrue(len(out) == 0)
file_obj.seek(0)
actual_out = file_obj.read()
file_obj.close()
self.assertTrue(len(actual_out) != 0)
# test with tee
file_obj = tempfile.NamedTemporaryFile()
out = python(py.name, _out=file_obj, _tee=True)
self.assertTrue(len(out) != 0)
file_obj.seek(0)
actual_out = file_obj.read()
file_obj.close()
self.assertTrue(len(actual_out) != 0)
def test_err_redirection(self):
import tempfile
py = create_tmp_test("""
import sys
import os
sys.stdout.write("stdout")
sys.stderr.write("stderr")
""")
file_obj = tempfile.NamedTemporaryFile()
p = python("-u", py.name, _err=file_obj)
file_obj.seek(0)
stderr = file_obj.read().decode()
file_obj.close()
self.assertTrue(p.stdout == b"stdout")
self.assertTrue(stderr == "stderr")
self.assertTrue(len(p.stderr) == 0)
# now with tee
file_obj = tempfile.NamedTemporaryFile()
p = python(py.name, _err=file_obj, _tee="err")
file_obj.seek(0)
stderr = file_obj.read().decode()
file_obj.close()
self.assertTrue(p.stdout == b"stdout")
self.assertTrue(stderr == "stderr")
self.assertTrue(len(p.stderr) != 0)
def test_tty_tee(self):
py = create_tmp_test("""
import sys
sys.stdout.write("stdout")
""")
read, write = pty.openpty()
out = python("-u", py.name, _out=write).stdout
tee = os.read(read, 6)
self.assertEqual(out, b"")
self.assertEqual(tee, b"stdout")
os.close(write)
os.close(read)
read, write = pty.openpty()
out = python("-u", py.name, _out=write, _tee=True).stdout
tee = os.read(read, 6)
self.assertEqual(out, b"stdout")
self.assertEqual(tee, b"stdout")
os.close(write)
os.close(read)
def test_err_redirection_actual_file(self):
import tempfile
file_obj = tempfile.NamedTemporaryFile()
py = create_tmp_test("""
import sys
import os
sys.stdout.write("stdout")
sys.stderr.write("stderr")
""")
stdout = python("-u", py.name, _err=file_obj.name).wait()
file_obj.seek(0)
stderr = file_obj.read().decode()
file_obj.close()
self.assertTrue(stdout == "stdout")
self.assertTrue(stderr == "stderr")
def test_subcommand_and_bake(self):
from sh import ls
import getpass
py = create_tmp_test("""
import sys
import os
import subprocess
print("subcommand")
subprocess.Popen(sys.argv[1:], shell=False).wait()
""")
cmd1 = python.bake(py.name)
out = cmd1.whoami()
self.assertTrue("subcommand" in out)
self.assertTrue(getpass.getuser() in out)
def test_multiple_bakes(self):
py = create_tmp_test("""
import sys
sys.stdout.write(str(sys.argv[1:]))
""")
out = python.bake(py.name).bake("bake1").bake("bake2")()
self.assertEqual("['bake1', 'bake2']", out)
def test_arg_preprocessor(self):
py = create_tmp_test("""
import sys
sys.stdout.write(str(sys.argv[1:]))
""")
def arg_preprocess(args, kwargs):
args.insert(0, "preprocessed")
kwargs["a-kwarg"] = 123
return args, kwargs
cmd = python.bake(py.name, _arg_preprocess=arg_preprocess)
out = cmd("arg")
self.assertEqual("['preprocessed', 'arg', '--a-kwarg=123']", out)
def test_bake_args_come_first(self):
from sh import ls
ls = ls.bake(h=True)
ran = ls("-la").ran
ft = ran.index("-h")
self.assertTrue("-la" in ran[ft:])
def test_output_equivalence(self):
from sh import whoami
iam1 = whoami()
iam2 = whoami()
self.assertEqual(iam1, iam2)
# https://github.com/amoffat/sh/pull/252
def test_stdout_pipe(self):
py = create_tmp_test(r"""
import sys
sys.stdout.write("foobar\n")
""")
read_fd, write_fd = os.pipe()
p = python(py.name, _out=write_fd, u=True)
def alarm(sig, action):
self.fail("Timeout while reading from pipe")
import signal
signal.signal(signal.SIGALRM, alarm)
signal.alarm(3)
data = os.read(read_fd, 100)
self.assertEqual(b"foobar\n", data)
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
def test_stdout_callback(self):
py = create_tmp_test("""
import sys
import os
for i in range(5): print(i)
""")
stdout = []
def agg(line):
stdout.append(line)
p = python("-u", py.name, _out=agg)
p.wait()
self.assertTrue(len(stdout) == 5)
def test_stdout_callback_no_wait(self):
import time
py = create_tmp_test("""
import sys
import os
import time
for i in range(5):
print(i)
time.sleep(.5)
""")
stdout = []
def agg(line): stdout.append(line)
p = python("-u", py.name, _out=agg, _bg=True)
# we give a little pause to make sure that the NamedTemporaryFile
# exists when the python process actually starts
time.sleep(.5)
self.assertTrue(len(stdout) != 5)
def test_stdout_callback_line_buffered(self):
py = create_tmp_test("""
import sys
import os
for i in range(5): print("herpderp")
""")
stdout = []
def agg(line): stdout.append(line)
p = python("-u", py.name, _out=agg, _out_bufsize=1)
p.wait()
self.assertTrue(len(stdout) == 5)
def test_stdout_callback_line_unbuffered(self):
py = create_tmp_test("""
import sys
import os
for i in range(5): print("herpderp")
""")
stdout = []
def agg(char): stdout.append(char)
p = python("-u", py.name, _out=agg, _out_bufsize=0)
p.wait()
# + 5 newlines
self.assertTrue(len(stdout) == (len("herpderp") * 5 + 5))
def test_stdout_callback_buffered(self):
py = create_tmp_test("""
import sys
import os
for i in range(5): sys.stdout.write("herpderp")
""")
stdout = []
def agg(chunk): stdout.append(chunk)
p = python("-u", py.name, _out=agg, _out_bufsize=4)
p.wait()
self.assertTrue(len(stdout) == (len("herp") / 2 * 5))
def test_stdout_callback_with_input(self):
py = create_tmp_test("""
import sys
import os
IS_PY3 = sys.version_info[0] == 3
if IS_PY3: raw_input = input
for i in range(5): print(str(i))
derp = raw_input("herp? ")
print(derp)
""")
def agg(line, stdin):
if line.strip() == "4": stdin.put("derp\n")
p = python("-u", py.name, _out=agg, _tee=True)
p.wait()
self.assertTrue("derp" in p)
def test_stdout_callback_exit(self):
py = create_tmp_test("""
import sys
import os
for i in range(5): print(i)
""")
stdout = []
def agg(line):
line = line.strip()
stdout.append(line)
if line == "2": return True
p = python("-u", py.name, _out=agg, _tee=True)
p.wait()
self.assertTrue("4" in p)
self.assertTrue("4" not in stdout)
def test_stdout_callback_terminate(self):
import signal
py = create_tmp_test("""
import sys
import os
import time
for i in range(5):
print(i)
time.sleep(.5)
""")
stdout = []
def agg(line, stdin, process):
line = line.strip()
stdout.append(line)
if line == "3":
process.terminate()
return True
import sh
caught_signal = False
try:
p = python("-u", py.name, _out=agg, _bg=True)
p.wait()
except sh.SignalException_SIGTERM:
caught_signal = True
self.assertTrue(caught_signal)
self.assertEqual(p.process.exit_code, -signal.SIGTERM)
self.assertTrue("4" not in p)
self.assertTrue("4" not in stdout)
def test_stdout_callback_kill(self):
import signal
py = create_tmp_test("""
import sys
import os
import time
for i in range(5):
print(i)
time.sleep(.5)
""")
stdout = []
def agg(line, stdin, process):
line = line.strip()
stdout.append(line)
if line == "3":
process.kill()
return True
import sh
caught_signal = False
try:
p = python("-u", py.name, _out=agg, _bg=True)
p.wait()
except sh.SignalException_SIGKILL:
caught_signal = True
self.assertTrue(caught_signal)
self.assertEqual(p.process.exit_code, -signal.SIGKILL)
self.assertTrue("4" not in p)
self.assertTrue("4" not in stdout)
def test_general_signal(self):
import signal
from signal import SIGINT
py = create_tmp_test("""
import sys
import os
import time
import signal
def sig_handler(sig, frame):
print(10)
exit(0)
signal.signal(signal.SIGINT, sig_handler)
for i in range(5):
print(i)
sys.stdout.flush()
time.sleep(0.5)
""")
stdout = []
def agg(line, stdin, process):
line = line.strip()
stdout.append(line)
if line == "3":
process.signal(SIGINT)
return True
p = python(py.name, _out=agg, _tee=True)
p.wait()
self.assertEqual(p.process.exit_code, 0)
self.assertEqual(p, "0\n1\n2\n3\n10\n")
def test_iter_generator(self):
py = create_tmp_test("""
import sys
import os
import time
for i in range(42):
print(i)
sys.stdout.flush()
""")
out = []
for line in python(py.name, _iter=True):
out.append(int(line.strip()))
self.assertTrue(len(out) == 42 and sum(out) == 861)
def test_iter_unicode(self):
# issue https://github.com/amoffat/sh/issues/224
test_string = "\xe4\xbd\x95\xe4\xbd\x95\n" * 150 # len > buffer_s
txt = create_tmp_test(test_string)
for line in sh.cat(txt.name, _iter=True):
break
self.assertTrue(len(line) < 1024)
def test_nonblocking_iter(self):
from errno import EWOULDBLOCK
py = create_tmp_test("""
import time
import sys
time.sleep(1)
sys.stdout.write("stdout")
""")
count = 0
value = None
for line in python(py.name, _iter_noblock=True):
if line == EWOULDBLOCK:
count += 1
else:
value = line
self.assertTrue(count > 0)
self.assertEqual(value, "stdout")
py = create_tmp_test("""
import time
import sys
time.sleep(1)
sys.stderr.write("stderr")
""")
count = 0
value = None
for line in python(py.name, _iter_noblock="err"):
if line == EWOULDBLOCK:
count += 1
else:
value = line
self.assertTrue(count > 0)
self.assertEqual(value, "stderr")
def test_for_generator_to_err(self):
py = create_tmp_test("""
import sys
import os
for i in range(42):
sys.stderr.write(str(i)+"\\n")
""")
out = []
for line in python("-u", py.name, _iter="err"):
out.append(line)
self.assertTrue(len(out) == 42)
# verify that nothing is going to stdout
out = []
for line in python("-u", py.name, _iter="out"):
out.append(line)
self.assertTrue(len(out) == 0)
def test_sigpipe(self):
import sh
py1 = create_tmp_test("""
import sys
import os
import time
import signal
# by default, python disables SIGPIPE, in favor of using IOError exceptions, so
# let's put that back to the system default where we terminate with a signal
# exit code
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
for letter in "andrew":
time.sleep(0.6)
print(letter)
""")
py2 = create_tmp_test("""
import sys
import os
import time
while True:
line = sys.stdin.readline()
if not line:
break
print(line.strip().upper())
exit(0)
""")
p1 = python("-u", py1.name, _piped="out")
p2 = python(p1, "-u", py2.name)
# SIGPIPE should happen, but it shouldn't be an error, since _piped is
# truthful
self.assertEqual(-p1.exit_code, signal.SIGPIPE)
self.assertEqual(p2.exit_code, 0)
def test_piped_generator(self):
from sh import tr
from string import ascii_uppercase
import time
py1 = create_tmp_test("""
import sys
import os
import time
for letter in "andrew":
time.sleep(0.6)
print(letter)
""")
py2 = create_tmp_test("""
import sys
import os
import time
while True:
line = sys.stdin.readline()
if not line:
break
print(line.strip().upper())
""")
times = []
last_received = None
letters = ""
for line in python(python("-u", py1.name, _piped="out"), "-u",
py2.name, _iter=True):
if not letters:
start = time.time()
letters += line.strip()
now = time.time()
if last_received: times.append(now - last_received)
last_received = now
self.assertEqual("ANDREW", letters)
self.assertTrue(all([t > .3 for t in times]))
def test_generator_and_callback(self):
py = create_tmp_test("""
import sys
import os
for i in range(42):
sys.stderr.write(str(i * 2)+"\\n")
print(i)
""")
stderr = []
def agg(line): stderr.append(int(line.strip()))
out = []
for line in python("-u", py.name, _iter=True, _err=agg): out.append(line)
self.assertTrue(len(out) == 42)
self.assertTrue(sum(stderr) == 1722)
def test_cast_bg(self):
py = create_tmp_test("""
import sys
import time
time.sleep(0.5)
sys.stdout.write(sys.argv[1])
""")
self.assertEqual(int(python(py.name, "123", _bg=True)), 123)
self.assertEqual(long(python(py.name, "456", _bg=True)), 456)
self.assertEqual(float(python(py.name, "789", _bg=True)), 789.0)
def test_cmd_eq(self):
py = create_tmp_test("")
cmd1 = python.bake(py.name, "-u")
cmd2 = python.bake(py.name, "-u")
cmd3 = python.bake(py.name)
self.assertEqual(cmd1, cmd2)
self.assertNotEqual(cmd1, cmd3)
def test_fg(self):
py = create_tmp_test("exit(0)")
# notice we're using `system_python`, and not `python`. this is because
# `python` has an env baked into it, and we want `_env` to be None for
# coverage
system_python(py.name, _fg=True)
def test_fg_false(self):
""" https://github.com/amoffat/sh/issues/520 """
py = create_tmp_test("print('hello')")
buf = StringIO()
python(py.name, _fg=False, _out=buf)
self.assertEqual(buf.getvalue(), "hello\n")
def test_fg_true(self):
""" https://github.com/amoffat/sh/issues/520 """
py = create_tmp_test("print('hello')")
buf = StringIO()
self.assertRaises(TypeError, python, py.name, _fg=True, _out=buf)
def test_fg_env(self):
py = create_tmp_test("""
import os
code = int(os.environ.get("EXIT", "0"))
exit(code)
""")
env = os.environ.copy()
env["EXIT"] = "3"
self.assertRaises(sh.ErrorReturnCode_3, python, py.name, _fg=True,
_env=env)
def test_fg_alternative(self):
py = create_tmp_test("exit(0)")
python(py.name, _in=sys.stdin, _out=sys.stdout, _err=sys.stderr)
def test_fg_exc(self):
py = create_tmp_test("exit(1)")
self.assertRaises(sh.ErrorReturnCode_1, python, py.name, _fg=True)
def test_out_filename(self):
outfile = tempfile.NamedTemporaryFile()
py = create_tmp_test("print('output')")
python(py.name, _out=outfile.name)
outfile.seek(0)
self.assertEqual(b"output\n", outfile.read())
def test_bg_exit_code(self):
py = create_tmp_test("""
import time
time.sleep(1)
exit(49)
""")
p = python(py.name, _ok_code=49, _bg=True)
self.assertEqual(49, p.exit_code)
def test_cwd(self):
from sh import pwd
from os.path import realpath
self.assertEqual(str(pwd(_cwd="/tmp")), realpath("/tmp") + "\n")
self.assertEqual(str(pwd(_cwd="/etc")), realpath("/etc") + "\n")
def test_cwd_fg(self):
td = realpath(tempfile.mkdtemp())
py = create_tmp_test("""
import sh
import os
from os.path import realpath
orig = realpath(os.getcwd())
print(orig)
sh.pwd(_cwd="{newdir}", _fg=True)
print(realpath(os.getcwd()))
""".format(newdir=td))
orig, newdir, restored = python(py.name).strip().split("\n")
newdir = realpath(newdir)
self.assertEqual(newdir, td)
self.assertEqual(orig, restored)
self.assertNotEqual(orig, newdir)
os.rmdir(td)
def test_huge_piped_data(self):
from sh import tr
stdin = tempfile.NamedTemporaryFile()
data = "herpderp" * 4000 + "\n"
stdin.write(data.encode())
stdin.flush()
stdin.seek(0)
out = tr(tr("[:lower:]", "[:upper:]", _in=data), "[:upper:]", "[:lower:]")
self.assertTrue(out == data)
def test_tty_input(self):
py = create_tmp_test("""
import sys
import os
if os.isatty(sys.stdin.fileno()):
sys.stdout.write("password?\\n")
sys.stdout.flush()
pw = sys.stdin.readline().strip()
sys.stdout.write("%s\\n" % ("*" * len(pw)))
sys.stdout.flush()
else:
sys.stdout.write("no tty attached!\\n")
sys.stdout.flush()
""")
test_pw = "test123"
expected_stars = "*" * len(test_pw)
d = {}
def password_enterer(line, stdin):
line = line.strip()
if not line: return
if line == "password?":
stdin.put(test_pw + "\n")
elif line.startswith("*"):
d["stars"] = line
return True
pw_stars = python(py.name, _tty_in=True, _out=password_enterer)
pw_stars.wait()
self.assertEqual(d["stars"], expected_stars)
response = python(py.name)
self.assertEqual(response, "no tty attached!\n")
def test_tty_output(self):
py = create_tmp_test("""
import sys
import os
if os.isatty(sys.stdout.fileno()):
sys.stdout.write("tty attached")
sys.stdout.flush()
else:
sys.stdout.write("no tty attached")
sys.stdout.flush()
""")
out = python(py.name, _tty_out=True)
self.assertEqual(out, "tty attached")
out = python(py.name, _tty_out=False)
self.assertEqual(out, "no tty attached")
def test_stringio_output(self):
from sh import echo
out = StringIO()
echo("-n", "testing 123", _out=out)
self.assertEqual(out.getvalue(), "testing 123")
out = cStringIO()
echo("-n", "testing 123", _out=out)
self.assertEqual(out.getvalue().decode(), "testing 123")
out = ioStringIO()
echo("-n", "testing 123", _out=out)
self.assertEqual(out.getvalue(), "testing 123")
out = iocStringIO()
echo("-n", "testing 123", _out=out)
self.assertEqual(out.getvalue().decode(), "testing 123")
def test_stringio_input(self):
from sh import cat
input = StringIO()
input.write("herpderp")
input.seek(0)
out = cat(_in=input)
self.assertEqual(out, "herpderp")
def test_internal_bufsize(self):
from sh import cat
output = cat(_in="a"*1000, _internal_bufsize=100, _out_bufsize=0)
self.assertEqual(len(output), 100)
output = cat(_in="a"*1000, _internal_bufsize=50, _out_bufsize=2)
self.assertEqual(len(output), 100)
def test_change_stdout_buffering(self):
py = create_tmp_test("""
import sys
import os
# this proves that we won't get the output into our callback until we send
# a newline
sys.stdout.write("switch ")
sys.stdout.flush()
sys.stdout.write("buffering\\n")
sys.stdout.flush()
sys.stdin.read(1)
sys.stdout.write("unbuffered")
sys.stdout.flush()
# this is to keep the output from being flushed by the process ending, which
# would ruin our test. we want to make sure we get the string "unbuffered"
# before the process ends, without writing a newline
sys.stdin.read(1)
""")
d = {
"newline_buffer_success": False,
"unbuffered_success": False,
}
def interact(line, stdin, process):
line = line.strip()
if not line: return
if line == "switch buffering":
d["newline_buffer_success"] = True
process.change_out_bufsize(0)
stdin.put("a")
elif line == "unbuffered":
stdin.put("b")
d["unbuffered_success"] = True
return True
# start with line buffered stdout
pw_stars = python("-u", py.name, _out=interact, _out_bufsize=1)
pw_stars.wait()
self.assertTrue(d["newline_buffer_success"])
self.assertTrue(d["unbuffered_success"])
def test_callable_interact(self):
py = create_tmp_test("""
import sys
sys.stdout.write("line1")
""")
class Callable(object):
def __init__(self):
self.line = None
def __call__(self, line):
self.line = line
cb = Callable()
python(py.name, _out=cb)
self.assertEqual(cb.line, "line1")
def test_encoding(self):
return
raise NotImplementedError("what's the best way to test a different \
'_encoding' special keyword argument?")
def test_timeout(self):
import sh
from time import time
sleep_for = 3
timeout = 1
started = time()
try:
sh.sleep(sleep_for, _timeout=timeout).wait()
except sh.TimeoutException as e:
self.assertEqual(e.full_cmd, '/bin/sleep 3')
else:
self.fail("no timeout exception")
elapsed = time() - started
self.assertTrue(abs(elapsed - timeout) < 0.5)
def test_timeout_overstep(self):
started = time.time()
sh.sleep(1, _timeout=5)
elapsed = time.time() - started
self.assertTrue(abs(elapsed - 1) < 0.5)
def test_timeout_wait(self):
started = time.time()
p = sh.sleep(3, _bg=True)
self.assertRaises(sh.TimeoutException, p.wait, timeout=1)
def test_timeout_wait_overstep(self):
started = time.time()
p = sh.sleep(1, _bg=True)
p.wait(timeout=5)
def test_timeout_wait_negative(self):
started = time.time()
p = sh.sleep(3, _bg=True)
self.assertRaises(RuntimeError, p.wait, timeout=-3)
def test_binary_pipe(self):
binary = b'\xec;\xedr\xdbF'
py1 = create_tmp_test("""
import sys
import os
sys.stdout = os.fdopen(sys.stdout.fileno(), "wb", 0)
sys.stdout.write(b'\\xec;\\xedr\\xdbF')
""")
py2 = create_tmp_test("""
import sys
import os
sys.stdin = os.fdopen(sys.stdin.fileno(), "rb", 0)
sys.stdout = os.fdopen(sys.stdout.fileno(), "wb", 0)
sys.stdout.write(sys.stdin.read())
""")
out = python(python(py1.name), py2.name)
self.assertEqual(out.stdout, binary)
# designed to trigger the "... (%d more, please see e.stdout)" output
# of the ErrorReturnCode class
def test_failure_with_large_output(self):
from sh import ErrorReturnCode_1
py = create_tmp_test("""
print("andrewmoffat" * 1000)
exit(1)
""")
self.assertRaises(ErrorReturnCode_1, python, py.name)
# designed to check if the ErrorReturnCode constructor does not raise
# an UnicodeDecodeError
def test_non_ascii_error(self):
from sh import ls, ErrorReturnCode
test = "/á"
# coerce to unicode
if IS_PY3:
pass
else:
test = test.decode("utf8")
self.assertRaises(ErrorReturnCode, ls, test)
def test_no_out(self):
py = create_tmp_test("""
import sys
sys.stdout.write("stdout")
sys.stderr.write("stderr")
""")
p = python(py.name, _no_out=True)
self.assertEqual(p.stdout, b"")
self.assertEqual(p.stderr, b"stderr")
self.assertTrue(p.process._pipe_queue.empty())
def callback(line): pass
p = python(py.name, _out=callback)
self.assertEqual(p.stdout, b"")
self.assertEqual(p.stderr, b"stderr")
self.assertTrue(p.process._pipe_queue.empty())
p = python(py.name)
self.assertEqual(p.stdout, b"stdout")
self.assertEqual(p.stderr, b"stderr")
self.assertFalse(p.process._pipe_queue.empty())
def test_tty_stdin(self):
py = create_tmp_test("""
import sys
sys.stdout.write(sys.stdin.read())
sys.stdout.flush()
""")
out = python(py.name, _in="test\n", _tty_in=True)
self.assertEqual("test\n", out)
def test_no_err(self):
py = create_tmp_test("""
import sys
sys.stdout.write("stdout")
sys.stderr.write("stderr")
""")
p = python(py.name, _no_err=True)
self.assertEqual(p.stderr, b"")
self.assertEqual(p.stdout, b"stdout")
self.assertFalse(p.process._pipe_queue.empty())
def callback(line): pass
p = python(py.name, _err=callback)
self.assertEqual(p.stderr, b"")
self.assertEqual(p.stdout, b"stdout")
self.assertFalse(p.process._pipe_queue.empty())
p = python(py.name)
self.assertEqual(p.stderr, b"stderr")
self.assertEqual(p.stdout, b"stdout")
self.assertFalse(p.process._pipe_queue.empty())
def test_no_pipe(self):
from sh import ls
# calling a command regular should fill up the pipe_queue
p = ls()
self.assertFalse(p.process._pipe_queue.empty())
# calling a command with a callback should not
def callback(line): pass
p = ls(_out=callback)
self.assertTrue(p.process._pipe_queue.empty())
# calling a command regular with no_pipe also should not
p = ls(_no_pipe=True)
self.assertTrue(p.process._pipe_queue.empty())
def test_decode_error_handling(self):
from functools import partial
py = create_tmp_test("""
# -*- coding: utf8 -*-
import sys
import os
sys.stdout = os.fdopen(sys.stdout.fileno(), 'wb')
IS_PY3 = sys.version_info[0] == 3
if IS_PY3:
sys.stdout.write(bytes("te漢字st", "utf8"))
else:
sys.stdout.write("te漢字st")
""")
fn = partial(python, py.name, _encoding="ascii")
def s(fn): str(fn())
self.assertRaises(UnicodeDecodeError, s, fn)
p = python(py.name, _encoding="ascii", _decode_errors="ignore")
self.assertEqual(p, "test")
def test_signal_exception(self):
from sh import SignalException_15
def throw_terminate_signal():
py = create_tmp_test("""
import time
while True: time.sleep(1)
""")
to_kill = python(py.name, _bg=True)
to_kill.terminate()
to_kill.wait()
self.assertRaises(SignalException_15, throw_terminate_signal)
def test_signal_group(self):
child = create_tmp_test("""
import time
time.sleep(3)
""")
parent = create_tmp_test("""
import sys
import sh
python = sh.Command(sys.executable)
p = python("{child_file}", _bg=True, _new_session=False)
print(p.pid)
print(p.process.pgid)
p.wait()
""", child_file=child.name)
def launch():
p = python(parent.name, _bg=True, _iter=True)
child_pid = int(next(p).strip())
child_pgid = int(next(p).strip())
parent_pid = p.pid
parent_pgid = p.process.pgid
return p, child_pid, child_pgid, parent_pid, parent_pgid
def assert_alive(pid):
os.kill(pid, 0)
def assert_dead(pid):
self.assert_oserror(errno.ESRCH, os.kill, pid, 0)
# first let's prove that calling regular SIGKILL on the parent does
# nothing to the child, since the child was launched in the same process
# group (_new_session=False) and the parent is not a controlling process
p, child_pid, child_pgid, parent_pid, parent_pgid = launch()
assert_alive(parent_pid)
assert_alive(child_pid)
p.kill()
time.sleep(0.1)
assert_dead(parent_pid)
assert_alive(child_pid)
self.assertRaises(sh.SignalException_SIGKILL, p.wait)
assert_dead(child_pid)
# now let's prove that killing the process group kills both the parent
# and the child
p, child_pid, child_pgid, parent_pid, parent_pgid = launch()
assert_alive(parent_pid)
assert_alive(child_pid)
p.kill_group()
time.sleep(0.1)
assert_dead(parent_pid)
assert_dead(child_pid)
def test_pushd(self):
""" test basic pushd functionality """
old_wd1 = sh.pwd().strip()
old_wd2 = os.getcwd()
self.assertEqual(old_wd1, old_wd2)
self.assertNotEqual(old_wd1, tempdir)
with sh.pushd(tempdir):
new_wd1 = sh.pwd().strip()
new_wd2 = os.getcwd()
old_wd3 = sh.pwd().strip()
old_wd4 = os.getcwd()
self.assertEqual(old_wd3, old_wd4)
self.assertEqual(old_wd1, old_wd3)
self.assertEqual(new_wd1, tempdir)
self.assertEqual(new_wd2, tempdir)
def test_pushd_cd(self):
""" test that pushd works like pushd/popd with built-in cd correctly """
import sh
from sh import mkdir
child = realpath(tempfile.mkdtemp())
try:
old_wd = os.getcwd()
with sh.pushd(tempdir):
self.assertEqual(tempdir, os.getcwd())
sh.cd(child)
self.assertEqual(child, os.getcwd())
self.assertEqual(old_wd, os.getcwd())
finally:
os.rmdir(child)
def test_cd_homedir(self):
orig = os.getcwd()
my_dir = os.path.expanduser("~")
sh.cd()
self.assertNotEqual(orig, os.getcwd())
self.assertEqual(my_dir, os.getcwd())
def test_non_existant_cwd(self):
from sh import ls
# sanity check
non_exist_dir = join(tempdir, "aowjgoahewro")
self.assertFalse(exists(non_exist_dir))
self.assertRaises(sh.ForkException, ls, _cwd=non_exist_dir)
# https://github.com/amoffat/sh/issues/176
def test_baked_command_can_be_printed(self):
from sh import ls
ll = ls.bake("-l")
self.assertTrue(str(ll).endswith("/ls -l"))
# https://github.com/amoffat/sh/issues/185
def test_done_callback(self):
import time
class Callback(object):
def __init__(self):
self.called = False
self.exit_code = None
self.success = None
def __call__(self, p, success, exit_code):
self.called = True
self.exit_code = exit_code
self.success = success
py = create_tmp_test("""
from time import time, sleep
sleep(1)
print(time())
""")
callback = Callback()
p = python(py.name, _done=callback, _bg=True)
# do a little setup to prove that a command with a _done callback is run
# in the background
wait_start = time.time()
p.wait()
wait_elapsed = time.time() - wait_start
self.assertTrue(callback.called)
self.assertTrue(abs(wait_elapsed - 1.0) < 1.0)
self.assertEqual(callback.exit_code, 0)
self.assertTrue(callback.success)
def test_fork_exc(self):
from sh import ForkException
py = create_tmp_test("")
def fail():
raise RuntimeError("nooo")
self.assertRaises(ForkException, python, py.name, _preexec_fn=fail)
def test_new_session(self):
from threading import Event
py = create_tmp_test("""
import os
import time
pid = os.getpid()
pgid = os.getpgid(pid)
sid = os.getsid(pid)
stuff = [pid, pgid, sid]
print(",".join([str(el) for el in stuff]))
time.sleep(0.5)
""")
event = Event()
def handle(line, stdin, p):
pid, pgid, sid = line.strip().split(",")
pid = int(pid)
pgid = int(pgid)
sid = int(sid)
self.assertEqual(p.pid, pid)
self.assertEqual(pid, pgid)
self.assertEqual(p.pgid, pgid)
self.assertEqual(pgid, p.get_pgid())
self.assertEqual(pid, sid)
self.assertEqual(sid, pgid)
self.assertEqual(p.sid, sid)
self.assertEqual(sid, p.get_sid())
event.set()
# new session
p = python(py.name, _out=handle)
p.wait()
self.assertTrue(event.is_set())
event.clear()
def handle(line, stdin, p):
pid, pgid, sid = line.strip().split(",")
pid = int(pid)
pgid = int(pgid)
sid = int(sid)
test_pid = os.getpgid(os.getpid())
self.assertEqual(p.pid, pid)
self.assertNotEqual(test_pid, pgid)
self.assertEqual(p.pgid, pgid)
self.assertEqual(pgid, p.get_pgid())
self.assertNotEqual(pid, sid)
self.assertNotEqual(sid, pgid)
self.assertEqual(p.sid, sid)
self.assertEqual(sid, p.get_sid())
event.set()
# no new session
p = python(py.name, _out=handle, _new_session=False)
p.wait()
self.assertTrue(event.is_set())
def test_done_cb_exc(self):
from sh import ErrorReturnCode
class Callback(object):
def __init__(self):
self.called = False
self.success = None
def __call__(self, p, success, exit_code):
self.success = success
self.called = True
py = create_tmp_test("exit(1)")
callback = Callback()
try:
p = python(py.name, _done=callback, _bg=True)
p.wait()
except ErrorReturnCode:
self.assertTrue(callback.called)
self.assertFalse(callback.success)
else:
self.fail("command should've thrown an exception")
def test_callable_stdin(self):
py = create_tmp_test("""
import sys
sys.stdout.write(sys.stdin.read())
""")
def create_stdin():
state = {"count": 0}
def stdin():
count = state["count"]
if count == 4:
return None
state["count"] += 1
return str(count)
return stdin
out = python(py.name, _in=create_stdin())
self.assertEqual("0123", out)
def test_stdin_unbuffered_bufsize(self):
import sh
from time import sleep
# this tries to receive some known data and measures the time it takes
# to receive it. since we're flushing by newline, we should only be
# able to receive the data when a newline is fed in
py = create_tmp_test("""
import sys
from time import time
started = time()
data = sys.stdin.read(len("testing"))
waited = time() - started
sys.stdout.write(data + "\\n")
sys.stdout.write(str(waited) + "\\n")
started = time()
data = sys.stdin.read(len("done"))
waited = time() - started
sys.stdout.write(data + "\\n")
sys.stdout.write(str(waited) + "\\n")
sys.stdout.flush()
""")
def create_stdin():
yield "test"
sleep(1)
yield "ing"
sleep(1)
yield "done"
out = python(py.name, _in=create_stdin(), _in_bufsize=0)
word1, time1, word2, time2, _ = out.split("\n")
time1 = float(time1)
time2 = float(time2)
self.assertEqual(word1, "testing")
self.assertTrue(abs(1-time1) < 0.5)
self.assertEqual(word2, "done")
self.assertTrue(abs(1-time2) < 0.5)
def test_stdin_newline_bufsize(self):
import sh
from time import sleep
# this tries to receive some known data and measures the time it takes
# to receive it. since we're flushing by newline, we should only be
# able to receive the data when a newline is fed in
py = create_tmp_test("""
import sys
from time import time
started = time()
data = sys.stdin.read(len("testing\\n"))
waited = time() - started
sys.stdout.write(data)
sys.stdout.write(str(waited) + "\\n")
started = time()
data = sys.stdin.read(len("done\\n"))
waited = time() - started
sys.stdout.write(data)
sys.stdout.write(str(waited) + "\\n")
sys.stdout.flush()
""")
# we'll feed in text incrementally, sleeping strategically before
# sending a newline. we then measure the amount that we slept
# indirectly in the child process
def create_stdin():
yield "test"
sleep(1)
yield "ing\n"
sleep(1)
yield "done\n"
out = python(py.name, _in=create_stdin(), _in_bufsize=1)
word1, time1, word2, time2, _ = out.split("\n")
time1 = float(time1)
time2 = float(time2)
self.assertEqual(word1, "testing")
self.assertTrue(abs(1-time1) < 0.5)
self.assertEqual(word2, "done")
self.assertTrue(abs(1-time2) < 0.5)
def test_custom_timeout_signal(self):
from sh import TimeoutException
import signal
py = create_tmp_test("""
import time
time.sleep(3)
""")
try:
python(py.name, _timeout=1, _timeout_signal=signal.SIGQUIT)
except TimeoutException as e:
self.assertEqual(e.exit_code, signal.SIGQUIT)
else:
self.fail("we should have handled a TimeoutException")
def test_append_stdout(self):
py = create_tmp_test("""
import sys
num = sys.stdin.read()
sys.stdout.write(num)
""")
append_file = tempfile.NamedTemporaryFile(mode="a+b")
python(py.name, _in="1", _out=append_file)
python(py.name, _in="2", _out=append_file)
append_file.seek(0)
output = append_file.read()
self.assertEqual(b"12", output)
def test_shadowed_subcommand(self):
py = create_tmp_test("""
import sys
sys.stdout.write(sys.argv[1])
""")
out = python.bake(py.name).bake_()
self.assertEqual("bake", out)
def test_no_proc_no_attr(self):
py = create_tmp_test("")
with python(py.name) as p:
self.assertRaises(AttributeError, getattr, p, "exit_code")
def test_partially_applied_callback(self):
from functools import partial
py = create_tmp_test("""
for i in range(10):
print(i)
""")
output = []
def fn(foo, line):
output.append((foo, int(line.strip())))
log_line = partial(fn, "hello")
out = python(py.name, _out=log_line)
self.assertEqual(output, [("hello", i) for i in range(10)])
output = []
def fn(foo, line, stdin, proc):
output.append((foo, int(line.strip())))
log_line = partial(fn, "hello")
out = python(py.name, _out=log_line)
self.assertEqual(output, [("hello", i) for i in range(10)])
# https://github.com/amoffat/sh/issues/266
def test_grandchild_no_sighup(self):
import time
# child process that will write to a file if it receives a SIGHUP
child = create_tmp_test("""
import signal
import sys
import time
output_file = sys.argv[1]
with open(output_file, "w") as f:
def handle_sighup(signum, frame):
f.write("got signal %d" % signum)
sys.exit(signum)
signal.signal(signal.SIGHUP, handle_sighup)
time.sleep(2)
f.write("made it!\\n")
""")
# the parent that will terminate before the child writes to the output
# file, potentially causing a SIGHUP
parent = create_tmp_test("""
import os
import time
import sys
child_file = sys.argv[1]
output_file = sys.argv[2]
python_name = os.path.basename(sys.executable)
os.spawnlp(os.P_NOWAIT, python_name, python_name, child_file, output_file)
time.sleep(1) # give child a chance to set up
""")
output_file = tempfile.NamedTemporaryFile(delete=True)
python(parent.name, child.name, output_file.name)
time.sleep(3)
out = output_file.readlines()[0]
self.assertEqual(out, b"made it!\n")
def test_unchecked_producer_failure(self):
from sh import ErrorReturnCode_2
producer = create_tmp_test("""
import sys
for i in range(10):
print(i)
sys.exit(2)
""")
consumer = create_tmp_test("""
import sys
for line in sys.stdin:
pass
""")
direct_pipe = python(producer.name, _piped=True)
self.assertRaises(ErrorReturnCode_2, python, direct_pipe, consumer.name)
def test_unchecked_pipeline_failure(self):
# similar to test_unchecked_producer_failure, but this
# tests a multi-stage pipeline
from sh import ErrorReturnCode_2
producer = create_tmp_test("""
import sys
for i in range(10):
print(i)
sys.exit(2)
""")
middleman = create_tmp_test("""
import sys
for line in sys.stdin:
print("> " + line)
""")
consumer = create_tmp_test("""
import sys
for line in sys.stdin:
pass
""")
producer_normal_pipe = python(producer.name, _piped=True)
middleman_normal_pipe = python(producer_normal_pipe, middleman.name, _piped=True)
self.assertRaises(ErrorReturnCode_2, python, middleman_normal_pipe, consumer.name)
@skip_unless(HAS_MOCK, "requires unittest.mock")
class MockTests(BaseTests):
def test_patch_command_cls(self):
def fn():
cmd = sh.Command("afowejfow")
return cmd()
@unittest.mock.patch("sh.Command")
def test(Command):
Command().return_value = "some output"
return fn()
self.assertEqual(test(), "some output")
self.assertRaises(sh.CommandNotFound, fn)
def test_patch_command(self):
def fn():
return sh.afowejfow()
@unittest.mock.patch("sh.afowejfow", create=True)
def test(cmd):
cmd.return_value = "some output"
return fn()
self.assertEqual(test(), "some output")
self.assertRaises(sh.CommandNotFound, fn)
class MiscTests(BaseTests):
def test_pickling(self):
import pickle
py = create_tmp_test("""
import sys
sys.stdout.write("some output")
sys.stderr.write("some error")
exit(1)
""")
try:
python(py.name)
except sh.ErrorReturnCode as e:
restored = pickle.loads(pickle.dumps(e))
self.assertEqual(restored.stdout, b"some output")
self.assertEqual(restored.stderr, b"some error")
self.assertEqual(restored.exit_code, 1)
else:
self.fail("Didn't get an exception")
@requires_poller("poll")
def test_fd_over_1024(self):
py = create_tmp_test("""print("hi world")""")
with ulimit(resource.RLIMIT_NOFILE, 2048):
cutoff_fd = 1024
pipes = []
for i in xrange(cutoff_fd):
master, slave = os.pipe()
pipes.append((master, slave))
if slave >= cutoff_fd:
break
python(py.name)
for master, slave in pipes:
os.close(master)
os.close(slave)
def test_args_deprecated(self):
self.assertRaises(DeprecationWarning, sh.args, _env={})
def test_percent_doesnt_fail_logging(self):
""" test that a command name doesn't interfere with string formatting in
the internal loggers """
py = create_tmp_test("""
print("cool")
""")
out = python(py.name, "%")
out = python(py.name, "%%")
out = python(py.name, "%%%")
# TODO
# for some reason, i can't get a good stable baseline measured in this test
# on osx. so skip it for now if osx
@not_macos
@requires_progs("lsof")
def test_no_fd_leak(self):
import sh
import os
from itertools import product
# options whose combinations can possibly cause fd leaks
kwargs = {
"_tty_out": (True, False),
"_tty_in": (True, False),
"_err_to_out": (True, False),
}
def get_opts(possible_values):
all_opts = []
for opt, values in possible_values.items():
opt_collection = []
all_opts.append(opt_collection)
for val in values:
pair = (opt, val)
opt_collection.append(pair)
for combo in product(*all_opts):
opt_dict = {}
for key, val in combo:
opt_dict[key] = val
yield opt_dict
test_pid = os.getpid()
def get_num_fds():
lines = sh.lsof(p=test_pid).strip().split("\n")
def test(line):
line = line.upper()
return "CHR" in line or "PIPE" in line
lines = [line for line in lines if test(line)]
return len(lines) - 1
py = create_tmp_test("")
def test_command(**opts):
python(py.name, **opts)
# make sure our baseline is stable.. we can remove this
test_command()
baseline = get_num_fds()
for i in xrange(10):
test_command()
num_fds = get_num_fds()
self.assertEqual(baseline, num_fds)
for opts in get_opts(kwargs):
for i in xrange(2):
test_command(**opts)
num_fds = get_num_fds()
self.assertEqual(baseline, num_fds, (baseline, num_fds, opts))
def test_pushd_thread_safety(self):
import threading
import time
temp1 = realpath(tempfile.mkdtemp())
temp2 = realpath(tempfile.mkdtemp())
try:
results = [None, None]
def fn1():
with sh.pushd(temp1):
time.sleep(0.2)
results[0] = realpath(os.getcwd())
def fn2():
time.sleep(0.1)
with sh.pushd(temp2):
results[1] = realpath(os.getcwd())
time.sleep(0.3)
t1 = threading.Thread(name="t1", target=fn1)
t2 = threading.Thread(name="t2", target=fn2)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(results, [temp1, temp2])
finally:
os.rmdir(temp1)
os.rmdir(temp2)
def test_stdin_nohang(self):
py = create_tmp_test("""
print("hi")
""")
read, write = os.pipe()
stdin = os.fdopen(read, "r")
python(py.name, _in=stdin)
@requires_utf8
def test_unicode_path(self):
from sh import Command
python_name = os.path.basename(sys.executable)
py = create_tmp_test("""#!/usr/bin/env {0}
# -*- coding: utf8 -*-
print("字")
""".format(python_name), prefix="字", delete=False)
try:
py.close()
os.chmod(py.name, int(0o755))
cmd = Command(py.name)
# all of these should behave just fine
str(cmd)
repr(cmd)
unicode(cmd)
running = cmd()
str(running)
repr(running)
unicode(running)
str(running.process)
repr(running.process)
unicode(running.process)
finally:
os.unlink(py.name)
# https://github.com/amoffat/sh/issues/121
def test_wraps(self):
from sh import ls
wraps(ls)(lambda f: True)
def test_signal_exception_aliases(self):
""" proves that signal exceptions with numbers and names are equivalent
"""
import signal
import sh
sig_name = "SignalException_%d" % signal.SIGQUIT
sig = getattr(sh, sig_name)
from sh import SignalException_SIGQUIT
self.assertEqual(sig, SignalException_SIGQUIT)
def test_change_log_message(self):
py = create_tmp_test("""
print("cool")
""")
def log_msg(cmd, call_args, pid=None):
return "Hi! I ran something"
buf = StringIO()
handler = logging.StreamHandler(buf)
logger = logging.getLogger("sh")
logger.setLevel(logging.INFO)
try:
logger.addHandler(handler)
python(py.name, "meow", "bark", _log_msg=log_msg)
finally:
logger.removeHandler(handler)
loglines = buf.getvalue().split("\n")
self.assertTrue(loglines, "Log handler captured no messages?")
self.assertTrue(loglines[0].startswith("Hi! I ran something"))
# https://github.com/amoffat/sh/issues/273
def test_stop_iteration_doesnt_block(self):
""" proves that calling calling next() on a stopped iterator doesn't
hang. """
import sh
py = create_tmp_test("""
print("cool")
""")
p = python(py.name, _iter=True)
for i in range(100):
try:
next(p)
except StopIteration:
pass
# https://github.com/amoffat/sh/issues/195
def test_threaded_with_contexts(self):
import sh
import threading
import time
py = create_tmp_test("""
import sys
a = sys.argv
res = (a[1], a[3])
sys.stdout.write(repr(res))
""")
p1 = python.bake("-u", py.name, 1)
p2 = python.bake("-u", py.name, 2)
results = [None, None]
def f1():
with p1:
time.sleep(1)
results[0] = str(system_python("one"))
def f2():
with p2:
results[1] = str(system_python("two"))
t1 = threading.Thread(target=f1)
t1.start()
t2 = threading.Thread(target=f2)
t2.start()
t1.join()
t2.join()
correct = [
"('1', 'one')",
"('2', 'two')",
]
self.assertEqual(results, correct)
# https://github.com/amoffat/sh/pull/292
def test_eintr(self):
import signal
def handler(num, frame): pass
signal.signal(signal.SIGALRM, handler)
py = create_tmp_test("""
import time
time.sleep(2)
""")
p = python(py.name, _bg=True)
signal.alarm(1)
p.wait()
class StreamBuffererTests(unittest.TestCase):
def test_unbuffered(self):
from sh import _disable_whitelist, StreamBufferer
b = StreamBufferer(0)
self.assertEqual(b.process(b"test"), [b"test"])
self.assertEqual(b.process(b"one"), [b"one"])
self.assertEqual(b.process(b""), [b""])
self.assertEqual(b.flush(), b"")
def test_newline_buffered(self):
from sh import _disable_whitelist, StreamBufferer
b = StreamBufferer(1)
self.assertEqual(b.process(b"testing\none\ntwo"), [b"testing\n", b"one\n"])
self.assertEqual(b.process(b"\nthree\nfour"), [b"two\n", b"three\n"])
self.assertEqual(b.flush(), b"four")
def test_chunk_buffered(self):
from sh import _disable_whitelist, StreamBufferer
b = StreamBufferer(10)
self.assertEqual(b.process(b"testing\none\ntwo"), [b"testing\non"])
self.assertEqual(b.process(b"\nthree\n"), [b"e\ntwo\nthre"])
self.assertEqual(b.flush(), b"e\n")
@requires_posix
class ExecutionContextTests(unittest.TestCase):
def test_basic(self):
import sh
out = StringIO()
_sh = sh(_out=out)
_sh.echo("-n", "TEST")
self.assertEqual("TEST", out.getvalue())
def test_no_interfere1(self):
import sh
out = StringIO()
_sh = sh(_out=out)
from _sh import echo
echo("-n", "TEST")
self.assertEqual("TEST", out.getvalue())
# Emptying the StringIO
out.seek(0)
out.truncate(0)
sh.echo("-n", "KO")
self.assertEqual("", out.getvalue())
def test_no_interfere2(self):
import sh
out = StringIO()
from sh import echo
_sh = sh(_out=out)
echo("-n", "TEST")
self.assertEqual("", out.getvalue())
def test_no_bad_name(self):
out = StringIO()
def fn():
import sh
sh = sh(_out=out)
self.assertRaises(RuntimeError, fn)
def test_set_in_parent_function(self):
import sh
out = StringIO()
_sh = sh(_out=out)
def nested1():
_sh.echo("-n", "TEST1")
def nested2():
import sh
sh.echo("-n", "TEST2")
nested1()
nested2()
self.assertEqual("TEST1", out.getvalue())
def test_reimport_no_interfere(self):
import sh
out = StringIO()
_sh = sh(_out=out)
import _sh # this reimport '_sh' from the eponymous local variable
_sh.echo("-n", "TEST")
self.assertEqual("TEST", out.getvalue())
def test_importer_detects_module_name(self):
import sh
_sh = sh()
omg = _sh
from omg import cat
def test_importer_only_works_with_sh(self):
def unallowed_import():
_os = os
from _os import path
self.assertRaises(ImportError, unallowed_import)
def test_reimport_from_cli(self):
# The REPL and CLI both need special handling to create an execution context that is safe to
# reimport
if IS_PY3:
cmdstr = '; '.join(('import sh, io, sys',
'out = io.StringIO()',
'_sh = sh(_out=out)',
'import _sh',
'_sh.echo("-n", "TEST")',
'sys.stderr.write(out.getvalue())',
))
else:
cmdstr = '; '.join(('import sh, StringIO, sys',
'out = StringIO.StringIO()',
'_sh = sh(_out=out)',
'import _sh',
'_sh.echo("-n", "TEST")',
'sys.stderr.write(out.getvalue())',
))
err = StringIO()
python('-c', cmdstr, _err=err)
self.assertEqual('TEST', err.getvalue())
if __name__ == "__main__":
root = logging.getLogger()
root.setLevel(logging.DEBUG)
root.addHandler(NullHandler())
test_kwargs = {}
if IS_PY2 and MINOR_VER != 6:
test_kwargs["failfast"] = True
test_kwargs["verbosity"] = 2
try:
# if we're running a specific test, we can let unittest framework figure out
# that test and run it itself. it will also handle setting the return code
# of the process if any tests error or fail
if len(sys.argv) > 1:
unittest.main(**test_kwargs)
# otherwise, it looks like we want to run all the tests
else:
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
test_kwargs["verbosity"] = 2
result = unittest.TextTestRunner(**test_kwargs).run(suite)
if not result.wasSuccessful():
exit(1)
finally:
if cov:
cov.stop()
cov.save()
|
multi_process_runner_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `multi_process_runner`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import threading
import time
from absl import logging
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.eager import test
def proc_func_that_adds_task_type_in_return_data():
return multi_worker_test_base.get_task_type()
def proc_func_that_errors():
raise ValueError('This is an error.')
def proc_func_that_does_nothing():
pass
def proc_func_that_adds_simple_return_data():
return 'dummy_data'
def proc_func_that_return_args_and_kwargs(*args, **kwargs):
return list(args) + list(kwargs.items())
def proc_func_with_barrier():
return multi_process_runner.barrier()
class MultiProcessRunnerTest(test.TestCase):
def _worker_idx(self):
config_task = json.loads(os.environ['TF_CONFIG'])['task']
return config_task['index']
def test_multi_process_runner(self):
mpr_result = multi_process_runner.run(
proc_func_that_adds_task_type_in_return_data,
multi_worker_test_base.create_cluster_spec(
num_workers=2, num_ps=3, has_eval=1))
job_count_dict = {'worker': 2, 'ps': 3, 'evaluator': 1}
for data in mpr_result.return_value:
job_count_dict[data] -= 1
self.assertEqual(job_count_dict['worker'], 0)
self.assertEqual(job_count_dict['ps'], 0)
self.assertEqual(job_count_dict['evaluator'], 0)
def test_multi_process_runner_error_propagates_from_subprocesses(self):
runner = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
max_run_time=20)
runner.start()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
runner.join()
def test_multi_process_runner_queue_emptied_between_runs(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
return_value = multi_process_runner.run(
proc_func_that_adds_simple_return_data, cluster_spec).return_value
self.assertTrue(return_value)
self.assertEqual(return_value[0], 'dummy_data')
self.assertEqual(return_value[1], 'dummy_data')
return_value = multi_process_runner.run(proc_func_that_does_nothing,
cluster_spec).return_value
self.assertFalse(return_value)
def test_multi_process_runner_args_passed_correctly(self):
return_value = multi_process_runner.run(
proc_func_that_return_args_and_kwargs,
multi_worker_test_base.create_cluster_spec(num_workers=1),
args=('a', 'b'),
kwargs={
'c_k': 'c_v'
}).return_value
self.assertEqual(return_value[0][0], 'a')
self.assertEqual(return_value[0][1], 'b')
self.assertEqual(return_value[0][2], ('c_k', 'c_v'))
def test_stdout_captured(self):
def simple_print_func():
print('This is something printed.', flush=True)
return 'This is returned data.'
mpr_result = multi_process_runner.run(
simple_print_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
std_stream_results = mpr_result.stdout
return_value = mpr_result.return_value
self.assertIn('[worker-0]: This is something printed.\n',
std_stream_results)
self.assertIn('[worker-1]: This is something printed.\n',
std_stream_results)
self.assertIn('This is returned data.', return_value)
def test_process_that_exits(self):
def func_to_exit_in_5_sec():
logging.error('foo')
time.sleep(10)
logging.error('bar')
mpr = multi_process_runner.MultiProcessRunner(
func_to_exit_in_5_sec,
multi_worker_test_base.create_cluster_spec(num_workers=1),
list_stdout=True,
max_run_time=5)
mpr.start()
stdout = mpr.join().stdout
self.assertLen([msg for msg in stdout if 'foo' in msg], 1)
self.assertLen([msg for msg in stdout if 'bar' in msg], 0)
def test_termination(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(5)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(5)
mpr.terminate('worker', 0)
std_stream_results = mpr.join().stdout
# Worker 0 is terminated in the middle, so it should not have iteration 9
# printed.
self.assertIn('[worker-0]: index 0, iteration 0\n', std_stream_results)
self.assertNotIn('[worker-0]: index 0, iteration 9\n',
std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_termination_and_start_single_process(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(3)
mpr.terminate('worker', 0)
mpr.start_single_process('worker', 0)
std_stream_results = mpr.join().stdout
# Worker 0 is terminated in the middle, but a new worker 0 is added, so it
# should still have iteration 9 printed. Moreover, iteration 0 of worker 0
# should happen twice.
self.assertLen(
[s for s in std_stream_results if 'index 0, iteration 0' in s], 2)
self.assertIn('[worker-0]: index 0, iteration 9\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_streaming(self):
def proc_func():
for i in range(5):
logging.info('(logging) %s-%d, i: %d',
multi_worker_test_base.get_task_type(), self._worker_idx(),
i)
print(
'(print) {}-{}, i: {}'.format(
multi_worker_test_base.get_task_type(), self._worker_idx(), i),
flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=2, num_ps=2, has_eval=True),
list_stdout=True)
mpr._dependence_on_chief = False
mpr.start()
mpr.start_single_process('worker', 2)
mpr.start_single_process('ps', 2)
mpr_result = mpr.join()
list_to_assert = mpr_result.stdout
for job in ['chief', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('(logging) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
for job in ['worker', 'ps']:
for iteration in range(5):
for task in range(3):
self.assertTrue(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
task = 3
self.assertFalse(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertFalse(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
def test_start_in_process_as(self):
def proc_func():
for i in range(5):
logging.info('%s-%d, i: %d', multi_worker_test_base.get_task_type(),
self._worker_idx(), i)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
list_stdout=True)
def eval_func():
time.sleep(1)
mpr.start_single_process(task_type='evaluator', task_id=0)
eval_thread = threading.Thread(target=eval_func)
eval_thread.start()
mpr.start_in_process_as(as_task_type='chief', as_task_id=0)
eval_thread.join()
list_to_assert = mpr.join().stdout
for job in ['worker', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('{}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
def test_barrier(self):
multi_process_runner.run(
proc_func_with_barrier,
cluster_spec=multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
)
def test_barrier_called_in_main_process(self):
with self.assertRaises(ValueError):
multi_process_runner.barrier()
if __name__ == '__main__':
multi_process_runner.test_main()
|
IndexFiles_zhCN.py
|
# SJTU EE208
INDEX_DIR = "IndexFiles.index"
import sys, os, lucene, threading, time
from datetime import datetime
# from java.io import File
from java.nio.file import Paths
from org.apache.lucene.analysis.miscellaneous import LimitTokenCountAnalyzer
from org.apache.lucene.analysis.standard import StandardAnalyzer
from org.apache.lucene.analysis.core import WhitespaceAnalyzer
from org.apache.lucene.document import Document, Field, FieldType, StringField, TextField
from org.apache.lucene.index import FieldInfo, IndexWriter, IndexWriterConfig, IndexOptions
from org.apache.lucene.store import SimpleFSDirectory
from org.apache.lucene.util import Version
import jieba
import paddle
import re
from bs4 import BeautifulSoup
from urllib.parse import urlparse
#pip install paddlepaddle,lxml
paddle.enable_static()
"""
This class is loosely based on the Lucene (java implementation) demo class
org.apache.lucene.demo.IndexFiles. It will take a directory as an argument
and will index all of the files in that directory and downward recursively.
It will index on the file path, the file name and the file contents. The
resulting Lucene index will be placed in the current directory and called
'index'.
"""
class Ticker(object):
def __init__(self):
self.tick = True
def run(self):
while self.tick:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(1.0)
def get_self_url(content):
prefix = "<!- SELF_URL_TAG:"
suffix = " -->"
off1 = len(prefix)
off2 = - len(suffix)
pattern = re.compile(prefix + ".*?" + suffix)
res = re.search(pattern=pattern,string = content)
st,ed = res.span()[0],res.span()[1]
return content[st + off1:ed + off2]
def get_domain(url):
return urlparse(url).netloc
def get_title(content):
soup = BeautifulSoup(content,features="html.parser")
title_ele = soup.find("title")
title = title_ele.string
return title
def clean_html(content):
soup = BeautifulSoup(content,features="html.parser")
for script in soup(["script", "style"]): # 去除javascript https://stackoverflow.com/questions/328356/extracting-text-from-html-file-using-python
script.extract()
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# drop blank lines
text = '\n'.join(line for line in lines if line)
return text
class IndexFiles(object):
"""Usage: python IndexFiles <doc_directory>"""
def __init__(self, root, storeDir):
if not os.path.exists(storeDir):
os.mkdir(storeDir)
# store = SimpleFSDirectory(File(storeDir).toPath())
store = SimpleFSDirectory(Paths.get(storeDir))
analyzer = WhitespaceAnalyzer()
analyzer = LimitTokenCountAnalyzer(analyzer, 1048576)
config = IndexWriterConfig(analyzer)
config.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
writer = IndexWriter(store, config)
self.indexDocs(root, writer)
ticker = Ticker()
print('commit index')
threading.Thread(target=ticker.run).start()
writer.commit()
writer.close()
ticker.tick = False
print('done')
def indexDocs(self, root, writer):
t1 = FieldType()
t1.setStored(True)
t1.setTokenized(False)
t1.setIndexOptions(IndexOptions.NONE) # Not Indexed
t2 = FieldType()
t2.setStored(False)
t2.setTokenized(True)
t2.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) # Indexes documents, frequencies and positions.
for root, dirnames, filenames in os.walk(root):
for filename in filenames:
if not filename.endswith('.html'):
continue
print("adding", filename)
try:
path = os.path.join(root, filename)
file = open(path, encoding='utf-8')
contents = file.read()
#print(contents[:100])
page_url = get_self_url(contents)
page_domain = get_domain(page_url)
page_title = get_title(contents)
contents = clean_html(contents)
cut_words = jieba.cut_for_search(contents) # requires paddlepaddle. -> pip install paddlepaddle
contents = " ".join(cut_words)
#print(contents)
file.close()
doc = Document()
doc.add(Field("name", filename, t1))
doc.add(Field("path", path, t1))
doc.add(Field("url", page_url, t1))
doc.add(TextField("site",page_domain,Field.Store.YES)) # 不能用t1,要想搜索site必须把site设为indexed!
doc.add(Field("title", page_title, t1))
print(filename,path,page_url,page_domain,page_title)
if len(contents) > 0:
doc.add(Field("contents", contents, t2))
else:
print("warning: no content in %s" % filename)
writer.addDocument(doc)
except Exception as e:
print("Failed in indexDocs:", e)
if __name__ == '__main__':
lucene.initVM()#vmargs=['-Djava.awt.headless=true'])
print('lucene', lucene.VERSION)
# import ipdb; ipdb.set_trace()
start = datetime.now()
try:
IndexFiles('../../html', "index_zhCN")
end = datetime.now()
print(end - start)
except Exception as e:
print("Failed: ", e)
raise e
|
temperature_server_IOC.py
|
"""Temperature controller server
The server communicates with Lightwave( previously known as temperature controller IOC) and Oasis IOC to synchronize the temperature changes.
Authors: Valentyn Stadnydskyi, Friedrich Schotte
Date created: 2019-05-08
Date last modified: 2019-05-14
"""
__version__ = "0.1" # Friedrich Schotte: bug fixes
from logging import debug,warn,info,error
import os
from IOC import IOC
import traceback
from time import time,sleep
from numpy import empty, mean, std, zeros, abs, where, nan , isnan
import numpy.polynomial.polynomial as poly
from scipy.interpolate import interp1d
from CA import caget, caput
from CAServer import casput,casget,casdel
class Temperature_Server_IOC(object):
name = "temperature_server_IOC"
from persistent_property import persistent_property
prefix = persistent_property("prefix","NIH:TEMP")
SCAN = persistent_property("SCAN",0.5)
P_default = persistent_property("P_default",1.000)
I_default = persistent_property("I_default",0.316)
D_default = persistent_property("D_default",0.562)
oasis_slave = persistent_property("oasis_slave",1)
temperature_oasis_switch = persistent_property("T_threshold",83.0)
idle_temperature_oasis = persistent_property("idle_temperature_oasis",8.0)
temperature_oasis_limit_high = persistent_property("temperature_oasis_limit_high",45.0)
oasis_headstart_time = persistent_property("oasis_headstart_time",15.0)
lightwave_prefix = persistent_property("lightwave_prefix",'NIH:LIGHTWAVE')
oasis_prefix = persistent_property("oasis_prefix",'NIH:CHILLER')
set_point_update_period = persistent_property("set_point_update_period",0.5)
running = False
last_valid_reply = 0
was_online = False
ramping_cancelled = False
idle_temperature = 22.0
time_points = []
temp_points = []
def get_EPICS_enabled(self):
return self.running
def set_EPICS_enabled(self,value):
from thread import start_new_thread
if value:
if not self.running: start_new_thread(self.run,())
else: self.running = False
EPICS_enabled = property(get_EPICS_enabled,set_EPICS_enabled)
def startup(self):
from CAServer import casput,casmonitor
from CA import caput,camonitor
from numpy import nan
#self.P_default , self.I_default , self.D_default = 1.0,0.316,0.562
#print('startup with prefix = %r' %self.prefix)
casput(self.prefix+".SCAN",self.SCAN)
casput(self.prefix+".DESC",value = "Temperature server IOC: a System Layer server that orchestrates setting on Lightwave IOC and Oasis IOC.", update = False)
casput(self.prefix+".EGU",value = "C")
# Set defaults
casput(self.prefix+".VAL",value = nan)
casput(self.prefix+".VAL_ADV",value = nan)
casput(self.prefix+".RBV",value = nan)
casput(self.prefix+".P",value = nan)
casput(self.prefix+".I",value = nan)
casput(self.prefix+".TIME_POINTS",self.time_points)
casput(self.prefix+".TEMP_POINTS",self.temp_points)
casput(self.prefix+".FAULTS"," ")
casput(self.prefix+".DMOV",value = nan)
casput(self.prefix+".KILL",value = 'write password to kill the process')
casput(self.prefix+".P_default",value = self.P_default)
casput(self.prefix+".I_default",value = self.I_default)
casput(self.prefix+".D_default",value = self.D_default)
casput(self.prefix+".oasis_slave",value = self.oasis_slave)
casput(self.prefix+".temperature_oasis_switch",value = self.temperature_oasis_switch)
casput(self.prefix+".idle_temperature_oasis",value = self.idle_temperature_oasis)
casput(self.prefix+".temperature_oasis_limit_high",value = self.temperature_oasis_limit_high)
casput(self.prefix+".oasis_headstart_time",value = self.oasis_headstart_time)
casput(self.prefix+".lightwave_prefix",value = self.lightwave_prefix)
casput(self.prefix+".oasis_prefix",value = self.oasis_prefix)
casput(self.prefix+".set_point_update_period",value = self.set_point_update_period)
casput(self.prefix+".oasis_RBV",value = nan)
casput(self.prefix+".oasis_VAL",value = nan)
#PV with a list of all process variable registered at the current Channel Access Server
casput(self.prefix+".LIST_ALL_PVS",value = self.get_pv_list())
# Monitor client-writable PVs.
casmonitor(self.prefix+".VAL",callback=self.monitor)
casmonitor(self.prefix+".VAL_ADV",callback=self.monitor)
casmonitor(self.prefix+".TIME_POINTS",callback=self.monitor)
casmonitor(self.prefix+".TEMP_POINTS",callback=self.monitor)
casmonitor(self.prefix+".KILL",callback=self.monitor)
casmonitor(self.prefix+".P_default",callback=self.monitor)
casmonitor(self.prefix+".I_default",callback=self.monitor)
casmonitor(self.prefix+".D_default",callback=self.monitor)
casmonitor(self.prefix+".oasis_slave",callback=self.monitor)
casmonitor(self.prefix+".temperature_oasis_switch",callback=self.monitor)
casmonitor(self.prefix+".idle_temperature_oasis",callback=self.monitor)
casmonitor(self.prefix+".temperature_oasis_limit_high",callback=self.monitor)
casmonitor(self.prefix+".oasis_headstart_time",callback=self.monitor)
casmonitor(self.prefix+".lightwave_prefix",callback=self.monitor)
casmonitor(self.prefix+".oasis_prefix",callback=self.monitor)
casmonitor(self.prefix+".set_point_update_period",callback=self.monitor)
#############################################################################
## Monitor server-writable PVs that come other servers
## Monitor Timing system IOC
from timing_system import timing_system
camonitor(timing_system.acquiring.PV_name,callback=self.on_acquire)
## Lightwave Temperature controller server
prefix = self.lightwave_prefix
camonitor(prefix+".VAL",callback=self.lightwave_monitor)
camonitor(prefix+".RBV",callback=self.lightwave_monitor)
camonitor(prefix+".P",callback=self.lightwave_monitor)
camonitor(prefix+".I",callback=self.lightwave_monitor)
camonitor(prefix+".DMOV",callback=self.lightwave_monitor)
## Oasis chiller server
prefix = self.oasis_prefix
camonitor(prefix+".VAL",callback=self.oasis_monitor)
camonitor(prefix+".RBV",callback=self.oasis_monitor)
## Create local circular buffers
from circular_buffer_LL import Server
self.buffers = {}
self.buffers['oasis_RBV'] = Server(size = (2,1*3600*2) , var_type = 'float64')
self.buffers['oasis_VAL'] = Server(size = (2,1*3600*2) , var_type = 'float64')
self.buffers['oasis_FAULTS'] = Server(size = (2,1*3600*2) , var_type = 'float64')
self.buffers['lightwave_RBV'] = Server(size = (2,1*3600*2) , var_type = 'float64')
self.buffers['lightwave_P'] = Server(size = (2,1*3600*2) , var_type = 'float64')
self.buffers['lightwave_I'] = Server(size = (2,1*3600*2) , var_type = 'float64')
self.buffers['lightwave_VAL'] = Server(size = (2,1*3600*2) , var_type = 'float64')
def update_once(self):
from CAServer import casput
from numpy import isfinite,isnan,nan
from time import time
from sleep import sleep
pass
def run(self):
"""Run EPICS IOC"""
self.startup()
self.running = True
while self.running:
sleep(0.1)
self.running = False
def start(self):
"""Run EPCIS IOC in background"""
from threading import Thread
task = Thread(target=self.run,name="temperature_server_IOC.run")
task.daemon = True
task.start()
def shutdown(self):
from CAServer import casdel
print('SHUTDOWN command received')
self.running = False
casdel(self.prefix)
del self
def get_pv_list(self):
from CAServer import PVs
lst = list(PVs.keys())
#lst_new = []
#for item in lst:
# lst_new.append(item.replace(self.prefix,'').replace('.',''))
return lst#lst_new
def monitor(self,PV_name,value,char_value):
"""Process PV change requests"""
from CAServer import casput
from CA import caput
print("monitor: %s = %r" % (PV_name,value))
if PV_name == self.prefix+".VAL_ADV":
if self.get_set_lightwaveT() != value or self.get_set_oasisT() != self.temp_to_oasis(value):
self.set_T(value)
if PV_name == self.prefix+".VAL":
if self.get_set_lightwaveT() != value or self.get_set_oasisT() != self.temp_to_oasis(value):
self.set_adv_T(value)
if PV_name == self.prefix + ".oasis_VAL":
if self.get_set_oasisT() != value:
self.set_set_oasisT(value)
if PV_name == self.prefix + ".TIME_POINTS":
self.time_points = value
if PV_name == self.prefix + ".TEMP_POINTS":
self.temp_points = value
if PV_name == self.prefix + ".KILL":
if value == 'shutdown':
self.shutdown()
if PV_name == self.prefix + ".P_default":
self.P_default = value
self.set_PIDCOF((self.P_default,self.I_default,self.D_default))
if PV_name == self.prefix + ".I_default":
self.I_default = value
self.set_PIDCOF((self.P_default,self.I_default,self.D_default))
if PV_name == self.prefix + ".D_default":
self.D_default = value
self.set_PIDCOF((self.P_default,self.I_default,self.D_default))
if PV_name == self.prefix + ".oasis_slave":
self.oasis_slave = value
if PV_name == self.prefix + ".temperature_oasis_switch":
self.temperature_oasis_switch = value
if PV_name == self.prefix + ".idle_temperature_oasis":
self.idle_temperature_oasis = value
if PV_name == self.prefix + ".temperature_oasis_limit_high":
self.temperature_oasis_limit_high = value
if PV_name == self.prefix + ".oasis_headstart_time":
self.oasis_headstart_time = value
if PV_name == self.prefix + ".lightwave_prefix":
self.lightwave_prefix = value
if PV_name == self.prefix + ".oasis_prefix":
self.oasis_prefix = value
if PV_name == self.prefix + ".set_point_update_period":
self.set_point_update_period = value
def lightwave_monitor(self,PV_name,value,char_value):
#print('time: %r, PV_name = %r,value= %r,char_value = %r' %(time(),PV_name,value,char_value) )
from CA import cainfo
from CAServer import casput
prefix = self.lightwave_prefix
if PV_name == prefix+".VAL":
arr = empty((2,1))
arr[0] = cainfo(prefix+".VAL","timestamp")
arr[1] = float(value)
self.buffers['lightwave_VAL'].append(arr)
casput(self.prefix +'.VAL',value = float(value))
if PV_name == prefix+".RBV":
arr = empty((2,1))
arr[0] = cainfo(prefix+".RBV","timestamp")
arr[1] = float(value)
self.buffers['lightwave_RBV'].append(arr)
casput(self.prefix +'.RBV',value = float(value))
if PV_name == prefix+".P":
arr = empty((2,1))
arr[0] = cainfo(prefix+".P","timestamp")
arr[1] = float(value)
self.buffers['lightwave_P'].append(arr)
casput(self.prefix +'.P',value = float(value))
if PV_name == prefix+".I":
arr = empty((2,1))
arr[0] = cainfo(prefix+".I","timestamp")
arr[1] = float(value)
self.buffers['lightwave_I'].append(arr)
casput(self.prefix +'.I',value = float(value))
#Done Move PV
if PV_name == prefix+".DMOV":
casput(self.prefix +'.DMOV',value = float(value))
def oasis_monitor(self,PV_name,value,char_value):
#print('oasis_monitor: time: %r, PV_name = %r,value= %r,char_value = %r' %(time(),PV_name,value,char_value) )
from CA import cainfo
prefix = self.oasis_prefix
if PV_name == prefix+".VAL":
arr = empty((2,1))
arr[0] = cainfo(prefix+".VAL","timestamp")
arr[1] = float(value)
self.buffers['oasis_VAL'].append(arr)
casput(self.prefix +'.oasis_VAL',value = float(value))
if PV_name == prefix+".RBV":
arr = empty((2,1))
arr[0] = cainfo(prefix+".RBV","timestamp")
arr[1] = float(value)
self.buffers['oasis_RBV'].append(arr)
casput(self.prefix +'.oasis_RBV',value = float(value))
## Temperature trajectory
def on_acquire(self):
"""
starts T-Ramp.
Usually called from monitor()
"""
print('on acquire')
self.ramping = self.acquiring
self.start_ramping()
def start_ramping(self):
"""
starts T-Ramp run_ramping_once method in a separate thread
"""
from thread import start_new_thread
start_new_thread(self.run_ramping_once,())
def run_ramping_once(self):
"""
runs ramping trajectory defined by self.time_points and self.temperatures
"""
from time_string import date_time
info("Ramp start time: %s" % date_time(self.start_time))
from time import time,sleep
from numpy import where, asarray
if len(self.temperatures) != 0:
max_set_T = max(self.temperatures)
min_set_T = min(self.temperatures)
else:
min_set_T = nan
max_set_T = nan
for (t,T, grad_T) in zip(self.times,self.temperatures,self.grad_temperatures):
dt = self.start_time + t - time()
if dt > 0:
sleep(dt)
current_setT = self.get_setT()
debug('t = %r, T = %r,dt = %r' %(t,T,dt))
if len(self.temp_points)>0:
self.set_ramp_T(T)
else:
info("The TEMP_POINTS list is empty. No temperature to set in the temperature trajectory.")
# if T == max_set_T or T == min_set_T:
# self.set_PIDCOF((self.P_default,self.I_default,self.D_default))
# else:
# (self.P_default,0.0,0.0)
# if grad_T > 0:
# self.set_PIDCOF((self.proportional_vs_sample_temperature(T,'up'),0.0,0.0))
# elif grad_T < 0:
# self.set_PIDCOF((self.proportional_vs_sample_temperature(T,'down'),0.0,0.0))
# else:
# self.set_PIDCOF((self.P_default,0.0,0.0))
try:
indices = where(self.times >= t+self.oasis_headstart_time)[0][0:1]
debug('current index in the trajectory = %r' %indices)
if len(indices) > 0:
idx = indices[0]
self.set_set_oasisT(self.oasis_temperatures[idx])
debug('time = %r, oasis T = %r' %(t,self.temp_to_oasis(self.temperatures[idx])))
except:
error(traceback.format_exc())
if self.ramping_cancelled: break
info("Ramp ended")
self.set_PIDCOF((self.P_default,self.I_default,self.D_default))
self.ramping_cancelled = False
self.ramping = False
@property
def acquiring(self):
from timing_system import timing_system
return timing_system.acquiring.value
@property
def start_time(self):
from numpy import nan
start_time = nan
from timing_system import timing_system
if timing_system.acquiring.value == 1:
from CA import cainfo
start_time = cainfo(timing_system.acquiring.PV_name,"timestamp")
return start_time
@property
def times(self):
"""
converts self.time_points to an array of values with specified spacing (readT_time_spacing0
"""
from numpy import arange,concatenate
min_dt = self.set_point_update_period
times = [[]]
for i in range(0,len(self.time_points)-1):
T0,T1 = self.time_points[i],self.time_points[i+1]
DT = T1-T0
N = max(int(DT/min_dt),1)
dt = DT/N
T = T0 + arange(0,N)*dt
times.append(T)
if len(self.time_points) > 0:
times.append([self.time_points[-1]])
times = concatenate(times)
return times
@property
def temperatures(self):
temperatures = []
time_points = self.time_points[0:self.N_points]
temp_points = self.temp_points[0:self.N_points]
if len(temp_points) > 1:
from scipy.interpolate import interp1d
f = interp1d(time_points,temp_points, kind='linear',bounds_error=False)
temperatures = f(self.times)
if len(temp_points) == 1:
from numpy import array
temperatures = array(temp_points)
return temperatures
@property
def grad_temperatures(self):
from numpy import gradient,array
temp_points = self.temp_points[0:self.N_points]
if len(temp_points) > 1:
grad = gradient(self.temperatures)
else:
grad = array([0])
return grad
@property
def oasis_temperatures(self):
from numpy import max
if len(self.temperatures) == 0:
t_oasis = []
else:
temp_points = self.temperatures
first_temp = self.temperatures[0]
max_temp = max(temp_points)
t_oasis = []
idx = 0
for temp in temp_points:
oasis_temp = self.temp_to_oasis(temp)
if max_temp >=self.temperature_oasis_switch:
if idx <=1:
t_oasis.append(oasis_temp)
elif idx > 1:
if temp > temp_points[idx-1] and temp_points[idx-1] > temp_points[idx-2]:
t_oasis.append(self.temperature_oasis_limit_high)
elif temp < temp_points[idx-1] and temp_points[idx-1] < temp_points[idx-2]:
t_oasis.append(self.idle_temperature_oasis)
else:
t_oasis.append(t_oasis[idx-2])
else:
t_oasis.append(oasis_temp)
idx +=1
return t_oasis
@property
def oasis_times(self):
time_points = self.times
time_oasis = []
for time in time_points:
time_oasis.append(time - self.oasis_dl.headstart_time)
return time_oasis
@property
def N_points(self):
return min(len(self.time_points),len(self.temp_points))
def get_setT(self):
value = self.buffers['lightwave_VAL'].get_last_N(N = 1)[1,0]
return value
def set_setT(self,value):
debug("set_point = %r" % value)
value = float(value)
if self.get_setT() != value:
self.lightwave_dl.set_cmdT(value)
self.oasis_dl.set_cmdT(self.temp_to_oasis(value))
setT = property(get_setT,set_setT)
def get_lightwaveT(self):
value = self.buffers['lightwave_RBV'].get_last_N(N = 1)[1,0]
return value
lightwaveT = property(get_lightwaveT)
def get_set_lightwaveT(self):
value = self.buffers['lightwave_VAL'].get_last_N(N = 1)[1,0]
return value
def set_set_lightwaveT(self,value):
from CA import caput, cawait
from numpy import isnan
if value is not isnan:
caput(self.lightwave_prefix + '.VAL', value = float(value))
cawait(self.lightwave_prefix + '.VAL')
set_lightwaveT = property(get_set_lightwaveT,set_set_lightwaveT)
def get_oasisT(self):
value = self.buffers['oasis_RBV'].get_last_N(N = 1)[1,0]
return value
oasisT = property(get_oasisT)
def get_set_oasisT(self):
value = self.buffers['oasis_VAL'].get_last_N(N = 1)[1,0]
return value
def set_set_oasisT(self,value):
from CA import caput
from numpy import isnan
if self.get_set_oasisT() != float(value):
if value is not isnan:
caput(self.oasis_prefix+'.VAL', value = float(value))
set_oasisT = property(get_set_oasisT,set_set_oasisT)
def set_T(self,value):
value = float(value)
if value != self.get_set_lightwaveT() or self.temp_to_oasis(value) != self.get_set_oasisT():
if self.oasis_slave:
self.set_set_oasisT(self.temp_to_oasis(value))
self.set_set_lightwaveT(value)
def set_ramp_T(self,value):
value = float(value)
if value != self.get_lightwaveT():
self.set_set_lightwaveT(value)
def set_adv_T(self,value):
value = float(value)
if value != self.get_lightwaveT() or self.temp_to_oasis(value) != self.get_set_oasisT() :
self.set_set_oasisT(self.temp_to_oasis(value))
self.set_PIDCOF((self.P_default,0.0,self.D_default))
self.set_set_lightwaveT(value)
info('set_set_lightwaveT %r at %r' %(value , time()))
info(abs(self.get_lightwaveT() - self.get_set_lightwaveT()))
if value >= self.temperature_oasis_switch:
t_diff = 3.0
else:
t_diff = 3.0
timeout = abs(self.get_lightwaveT() - self.get_set_lightwaveT())*1.5
t1 = time()
while abs(self.get_lightwaveT() - self.get_set_lightwaveT()) > t_diff:
sleep(0.05)
if time() - t1 > timeout:
break
self.set_PIDCOF((self.P_default,self.I_default,self.D_default))
def set_PCOF(self,value):
from CA import caput, cawait
if self.get_PCOF() != value:
caput(self.lightwave_prefix + '.PCOF',value)
cawait(self.lightwave_prefix + '.PCOF')
def get_PCOF(self):
from CA import caget
value = caget(self.lightwave_prefix + '.PCOF')
return value
def set_ICOF(self,value):
from CA import caput, cawait
if self.get_ICOF() != value:
caput(self.lightwave_prefix + '.ICOF',value)
cawait(self.lightwave_prefix + '.ICOF')
def get_ICOF(self):
from CA import caget
value = caget(self.lightwave_prefix + '.ICOF')
return value
def set_DCOF(self,value):
from CA import caput,cawait
if self.get_DCOF() != value:
caput(self.lightwave_prefix + '.DCOF',value)
cawait(self.lightwave_prefix + '.DCOF')
def get_DCOF(self):
from CA import caget
value = caget(self.lightwave_prefix + '.DCOF')
return value
def set_PIDCOF(self,value):
from CA import caput,cawait
if self.get_PIDCOF() != value:
print('setting PIDCOF: %r -> %r' %(self.get_PIDCOF(),value))
caput(self.lightwave_prefix + '.PIDCOF',value)
cawait(self.lightwave_prefix + '.PIDCOF')
def get_PIDCOF(self):
from CA import caget
value = caget(self.lightwave_prefix + '.PIDCOF')
return value
def temp_to_oasis(self,T, mode = 'bistable'):
if mode == 'bistable':
if T >= self.temperature_oasis_switch:
t = self.temperature_oasis_limit_high
else:
t = self.idle_temperature_oasis
else:
oasis_min = t_min= self.idle_temperature_oasis
oasis_max = t_max = self.temperature_oasis_limit_high
T_max= 120.0
T_min= -16
if T <=T_max or T >=T_min:
t = ((T-T_min)/(T_max-T_min))*(t_max-t_min) + t_min
elif T>T_max:
t = self.temperature_oasis_limit_high
elif T<T_min:
t = self.idle_temperature_oasis
if self.oasis_slave:
return round(t,1)
else:
return self.idle_temperature_oasis
def proportional_vs_sample_temperature(self, temperature = 0.0, direction = ''):
T = temperature
if direction == 'down':
P = 4e-8*T**4 - 1e-5*T**3 + 0.0012*T**2 - 0.0723*T + 3.3001
elif direction == 'up':
P = 7e-9*T**4 - 3e-6*T**3 + 0.0004*T**2 - 0.0003*T + 1.6942
else:
P = self.P_default
return round(P,3)
temperature_server_IOC = Temperature_Server_IOC()
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(levelname)s %(module)s.%(funcName)s: %(message)s",
)
from timing_sequencer import timing_sequencer
print("timing_sequencer.queue_active = %r" % timing_sequencer.queue_active)
print("timing_sequencer.queue_active = False # cancel acquistion")
print("timing_sequencer.queue_active = True # simulate acquistion")
print("timing_sequencer.queue_repeat_count = 0 # restart acquistion")
print("timing_sequencer.queue_active = True # simulate acquistion")
print("self.start_time = time(); self.start_ramping()")
self = temperature_server_IOC
##from matplotlib import pyplot as plt
self.time_points = [0.0,30.0,302.0,332.0,634.0,30.0+634.0,302.0+634.0,332.0+634.0,634.0+634.0]
self.temp_points = [-16,-16,120,120,-16,-16,120,120,-16]
##print("self.lightwave_dl.driver.feedback_loop.PID = (1.0, 0.300000012, 0.561999977)")
##print('plt.plot(self.times,self.temperatures); plt.plot(self.oasis_times,self.oasis_temperatures); plt.show()')
##plt.plot(self.times,self.temperatures); plt.plot(self.oasis_times,self.oasis_temperatures); plt.show()
|
plotmodel.py
|
from collections import defaultdict
import copy
import itertools
import threading
from ast import literal_eval
from PySide2.QtWidgets import QItemDelegate, QColorDialog, QLineEdit
from PySide2.QtCore import QAbstractTableModel, QModelIndex, Qt, QSize, QEvent
from PySide2.QtGui import QColor
import openmc
import openmc.lib
import numpy as np
from .statepointmodel import StatePointModel
from .plot_colors import random_rgb, reset_seed
ID, NAME, COLOR, COLORLABEL, MASK, HIGHLIGHT = tuple(range(0, 6))
__VERSION__ = "0.2.1"
_VOID_REGION = -1
_NOT_FOUND = -2
_OVERLAP = -3
_MODEL_PROPERTIES = ('temperature', 'density')
_PROPERTY_INDICES = {'temperature': 0, 'density': 1}
_REACTION_UNITS = 'Reactions per Source Particle'
_FLUX_UNITS = 'Particle-cm per Source Particle'
_PRODUCTION_UNITS = 'Particles Produced per Source Particle'
_ENERGY_UNITS = 'eV per Source Particle'
_SPATIAL_FILTERS = (openmc.UniverseFilter,
openmc.MaterialFilter,
openmc.CellFilter,
openmc.MeshFilter)
_PRODUCTIONS = ('delayed-nu-fission', 'prompt-nu-fission', 'nu-fission',
'nu-scatter', 'H1-production', 'H2-production',
'H3-production', 'He3-production', 'He4-production')
_SCORE_UNITS = {p: _PRODUCTION_UNITS for p in _PRODUCTIONS}
_SCORE_UNITS['flux'] = 'Particle-cm/Particle'
_SCORE_UNITS['current'] = 'Particles per source Particle'
_SCORE_UNITS['events'] = 'Events per Source Particle'
_SCORE_UNITS['inverse-velocity'] = 'Particle-seconds per Source Particle'
_SCORE_UNITS['heating'] = _ENERGY_UNITS
_SCORE_UNITS['heating-local'] = _ENERGY_UNITS
_SCORE_UNITS['kappa-fission'] = _ENERGY_UNITS
_SCORE_UNITS['fission-q-prompt'] = _ENERGY_UNITS
_SCORE_UNITS['fission-q-recoverable'] = _ENERGY_UNITS
_SCORE_UNITS['decay-rate'] = 'Seconds^-1'
_SCORE_UNITS['damage-energy'] = _ENERGY_UNITS
_TALLY_VALUES = {'Mean': 'mean',
'Std. Dev.': 'std_dev',
'Rel. Error': 'rel_err'}
class PlotModel():
""" Geometry and plot settings for OpenMC Plot Explorer model
Attributes
----------
geom : openmc.Geometry instance
OpenMC Geometry of the model
modelCells : collections.OrderedDict
Dictionary mapping cell IDs to openmc.Cell instances
modelMaterials : collections.OrderedDict
Dictionary mapping material IDs to openmc.Material instances
ids : NumPy int array (v_res, h_res, 1)
Mapping of plot coordinates to cell/material ID by pixel
image : NumPy int array (v_res, h_res, 3)
The current RGB image data
statepoint : StatePointModel
Simulation data model used to display tally results
applied_filters : tuple of ints
IDs of the applied filters for the displayed tally
previousViews : list of PlotView instances
List of previously created plot view settings used to undo
changes made in plot explorer
subsequentViews : list of PlotView instances
List of undone plot view settings used to redo changes made
in plot explorer
defaultView : PlotView instance
Default settings for given geometry
currentView : PlotView instance
Currently displayed plot settings in plot explorer
activeView : PlotView instance
Active state of settings in plot explorer, which may or may not
have unapplied changes
"""
def __init__(self):
""" Initialize PlotModel class attributes """
# Retrieve OpenMC Cells/Materials
self.modelCells = openmc.lib.cells
self.modelMaterials = openmc.lib.materials
self.max_universe_levels = openmc.lib._coord_levels()
# Cell/Material ID by coordinates
self.ids = None
self.version = __VERSION__
# default statepoint value
self._statepoint = None
# default tally/filter info
self.appliedFilters = ()
self.appliedScores = ()
self.appliedNuclides = ()
# reset random number seed for consistent
# coloring when reloading a model
reset_seed()
self.previousViews = []
self.subsequentViews = []
self.defaultView = self.getDefaultView()
self.currentView = copy.deepcopy(self.defaultView)
self.activeView = copy.deepcopy(self.defaultView)
def openStatePoint(self, filename):
self.statepoint = StatePointModel(filename, open_file=True)
@property
def statepoint(self):
return self._statepoint
@statepoint.setter
def statepoint(self, statepoint):
if statepoint is None:
self._statepoint = None
elif isinstance(statepoint, StatePointModel):
self._statepoint = statepoint
elif isinstance(statepoint, str):
self._statepoint = StatePointModel(statepoint, open_file=True)
else:
raise TypeError("Invalid statepoint object")
if self._statepoint and not self._statepoint.is_open:
self._statepoint.open()
def getDefaultView(self):
""" Generates default PlotView instance for OpenMC geometry
Centers plot view origin in every dimension if possible. Defaults
to xy basis, with height and width to accomodate full size of
geometry. Defaults to (0, 0, 0) origin with width and heigth of
25 if geometry bounding box cannot be generated.
Returns
-------
default : PlotView instance
PlotView instance with default view settings
"""
lower_left, upper_right = openmc.lib.global_bounding_box()
# Check for valid bounding_box dimensions
if -np.inf not in lower_left[:2] and np.inf not in upper_right[:2]:
xcenter = (upper_right[0] + lower_left[0])/2
width = abs(upper_right[0] - lower_left[0]) * 1.005
ycenter = (upper_right[1] + lower_left[1])/2
height = abs(upper_right[1] - lower_left[1]) * 1.005
else:
xcenter, ycenter, width, height = (0.00, 0.00, 25, 25)
if lower_left[2] != -np.inf and upper_right[2] != np.inf:
zcenter = (upper_right[2] + lower_left[2])/2
else:
zcenter = 0.00
default = PlotView([xcenter, ycenter, zcenter], width, height)
return default
def resetColors(self):
""" Reset colors to those generated in the default view """
self.activeView.cells = self.defaultView.cells
self.activeView.materials = self.defaultView.materials
def generatePlot(self):
""" Spawn thread from which to generate new plot image """
t = threading.Thread(target=self.makePlot)
t.start()
t.join()
def makePlot(self):
""" Generate new plot image from active view settings
Creates corresponding .xml files from user-chosen settings.
Runs OpenMC in plot mode to generate new plot image.
"""
cv = self.currentView = copy.deepcopy(self.activeView)
ids = openmc.lib.id_map(cv)
props = openmc.lib.property_map(cv)
self.cell_ids = ids[:, :, 0]
self.mat_ids = ids[:, :, 1]
# set model ids based on domain
if cv.colorby == 'cell':
self.ids = self.cell_ids
domain = cv.cells
source = self.modelCells
else:
self.ids = self.mat_ids
domain = cv.materials
source = self.modelMaterials
# generate colors if not present
for cell_id, cell in cv.cells.items():
if cell.color is None:
cell.color = random_rgb()
for mat_id, mat in cv.materials.items():
if mat.color is None:
mat.color = random_rgb()
# construct image data
domain[_OVERLAP] = DomainView(_OVERLAP, "Overlap", cv.overlap_color)
domain[_NOT_FOUND] = DomainView(_NOT_FOUND, "Not Found", cv.domainBackground)
u, inv = np.unique(self.ids, return_inverse=True)
image = np.array([domain[id].color for id in u])[inv]
image.shape = (cv.v_res, cv.h_res, 3)
if cv.masking:
for id, dom in domain.items():
if dom.masked:
image[self.ids == int(id)] = cv.maskBackground
if cv.highlighting:
for id, dom in domain.items():
if dom.highlight:
image[self.ids == int(id)] = cv.highlightBackground
# set model image
self.image = image
# set model properties
self.properties = props
# tally data
self.tally_data = None
self.properties[self.properties < 0.0] = np.nan
self.temperatures = self.properties[..., _PROPERTY_INDICES['temperature']]
self.densities = self.properties[..., _PROPERTY_INDICES['density']]
minmax = {}
for prop in _MODEL_PROPERTIES:
idx = _PROPERTY_INDICES[prop]
prop_data = self.properties[:, :, idx]
minmax[prop] = (np.min(np.nan_to_num(prop_data)),
np.max(np.nan_to_num(prop_data)))
self.activeView.data_minmax = minmax
def undo(self):
""" Revert to previous PlotView instance. Re-generate plot image """
if self.previousViews:
self.subsequentViews.append(copy.deepcopy(self.currentView))
self.activeView = self.previousViews.pop()
self.generatePlot()
def redo(self):
""" Revert to subsequent PlotView instance. Re-generate plot image """
if self.subsequentViews:
self.storeCurrent()
self.activeView = self.subsequentViews.pop()
self.generatePlot()
def storeCurrent(self):
""" Add current view to previousViews list """
self.previousViews.append(copy.deepcopy(self.currentView))
def create_tally_image(self, view=None):
if view is None:
view = self.currentView
tally_id = view.selectedTally
scores = self.appliedScores
nuclides = self.appliedNuclides
tally_selected = view.selectedTally is not None
tally_visible = view.tallyDataVisible
visible_selection = scores and nuclides
if not tally_selected or not tally_visible or not visible_selection:
return (None, None, None, None, None)
tally = self.statepoint.tallies[tally_id]
tally_value = _TALLY_VALUES[view.tallyValue]
# check score units
units = {_SCORE_UNITS.get(score, _REACTION_UNITS) for score in scores}
if len(units) != 1:
msg_box = QMessageBox()
unit_str = " ".join(units)
msg = "The scores selected have incompatible units:\n"
for unit in units:
msg += " - {}\n".format(unit)
msg_box.setText(msg)
msg_box.setIcon(QMessageBox.Information)
msg_box.setStandardButtons(QMessageBox.Ok)
msg_box.exec_()
return (None, None, None, None, None)
units_out = list(units)[0]
if tally.contains_filter(openmc.MeshFilter):
if tally_value == 'rel_err':
# get both the std. dev. data and mean data
# to create the relative error data
mean_data = self._create_tally_mesh_image(tally,
'mean',
scores,
nuclides,
view)
std_dev_data = self._create_tally_mesh_image(tally,
'std_dev',
scores,
nuclides,
view)
image_data = 100 * np.divide(std_dev_data[0],
mean_data[0],
out=np.zeros_like(mean_data[0]),
where=mean_data != 0)
extents = mean_data[1]
data_min = np.min(image_data)
data_max = np.max(image_data)
return image_data, extents, data_min, data_max, '% error'
else:
image = self._create_tally_mesh_image(tally,
tally_value,
scores,
nuclides,
view)
return image + (units_out,)
else:
# same as above, get the std. dev. data
# and mean date to produce the relative error data
if tally_value == 'rel_err':
mean_data = self._create_tally_domain_image(tally,
'mean',
scores,
nuclides,
view)
std_dev_data = self._create_tally_domain_image(tally,
'std_dev',
scores,
nuclides,
view)
image_data = 100 * np.divide(std_dev_data[0],
mean_data[0],
out=np.zeros_like(mean_data[0]),
where=mean_data != 0)
# adjust for NaNs in bins without tallies
image_data = np.nan_to_num(image_data,
nan=0.0,
posinf=0.0,
neginf=0.0)
extents = mean_data[1]
data_min = np.min(image_data)
data_max = np.max(image_data)
return image_data, extents, data_min, data_max, '% error'
else:
image = self._create_tally_domain_image(tally,
tally_value,
scores,
nuclides,
view)
return image + (units_out,)
def _create_tally_domain_image(self, tally, tally_value, scores, nuclides, view=None):
# data resources used throughout
if view is None:
view = self.currentView
data = tally.get_reshaped_data(tally_value)
data_out = np.full(self.ids.shape, -1.0)
def _do_op(array, tally_value, ax=0):
if tally_value == 'mean':
return np.sum(array, axis=ax)
elif tally_value == 'std_dev':
return np.sqrt(np.sum(array**2, axis=ax))
# data structure for tracking which spatial
# filter bins are enabled
spatial_filter_bins = defaultdict(list)
n_spatial_filters = 0
for tally_filter in tally.filters:
if tally_filter in self.appliedFilters:
selected_bins = self.appliedFilters[tally_filter]
if type(tally_filter) in _SPATIAL_FILTERS:
spatial_filter_bins[tally_filter] = selected_bins
n_spatial_filters += 1
else:
slc = [slice(None)] * len(data.shape)
slc[n_spatial_filters] = selected_bins
slc = tuple(slc)
data = _do_op(data[slc], tally_value, n_spatial_filters)
else:
data[:, ...] = 0.0
data = _do_op(data, tally_value, n_spatial_filters)
# filter by selected scores
selected_scores = []
for idx, score in enumerate(tally.scores):
if score in scores:
selected_scores.append(idx)
data = _do_op(data[..., np.array(selected_scores)], tally_value, -1)
# filter by selected nuclides
selected_nuclides = []
for idx, nuclide in enumerate(tally.nuclides):
if nuclide in nuclides:
selected_nuclides.append(idx)
data = _do_op(data[..., np.array(selected_nuclides)], tally_value, -1)
# get data limits
data_min = np.min(data)
data_max = np.max(data)
# for all combinations of spatial bins, create a mask
# and set image data values
spatial_filters = list(spatial_filter_bins.keys())
spatial_bins = list(spatial_filter_bins.values())
for bin_indices in itertools.product(*spatial_bins):
# look up the tally value
tally_val = data[bin_indices]
if tally_val == 0.0:
continue
# generate a mask with the correct size
mask = np.full(self.ids.shape, True, dtype=bool)
for tally_filter, bin_idx in zip(spatial_filters, bin_indices):
bin = tally_filter.bins[bin_idx]
if isinstance(tally_filter, openmc.CellFilter):
mask &= self.cell_ids == bin
elif isinstance(tally_filter, openmc.MaterialFilter):
mask &= self.mat_ids == bin
elif isinstance(tally_filter, openmc.UniverseFilter):
# get the statepoint summary
univ_cells = self.statepoint.universes[bin].cells
for cell in univ_cells:
mask &= self.cell_ids == cell
# set image data values
data_out[mask] = tally_val
# mask out invalid values
image_data = np.ma.masked_where(data_out < 0.0, data_out)
return image_data, None, data_min, data_max
def _create_tally_mesh_image(self, tally, tally_value, scores, nuclides, view=None):
# some variables used throughout
if view is None:
cv = self.currentView
sp = self.statepoint
mesh = tally.find_filter(openmc.MeshFilter).mesh
def _do_op(array, tally_value, ax=0):
if tally_value == 'mean':
return np.sum(array, axis=ax)
elif tally_value == 'std_dev':
return np.sqrt(np.sum(array**2, axis=ax))
# start with reshaped data
data = tally.get_reshaped_data(tally_value)
# determine basis indices
if view.basis == 'xy':
h_ind = 0
v_ind = 1
ax = 2
elif view.basis == 'yz':
h_ind = 1
v_ind = 2
ax = 0
else:
h_ind = 0
v_ind = 2
ax = 1
# reduce data to the visible slice of the mesh values
k = int((view.origin[ax] - mesh.lower_left[ax]) // mesh.width[ax])
# setup slice
data_slice = [None, None, None]
data_slice[h_ind] = slice(mesh.dimension[h_ind])
data_slice[v_ind] = slice(mesh.dimension[v_ind])
data_slice[ax] = k
if k < 0 or k > mesh.dimension[ax]:
return (None, None, None, None)
# move mesh axes to the end of the filters
filter_idx = [type(filter) for filter in tally.filters].index(openmc.MeshFilter)
data = np.moveaxis(data, filter_idx, -1)
# reshape data (with zyx ordering for mesh data)
data = data.reshape(data.shape[:-1] + tuple(mesh.dimension[::-1]))
data = data[..., data_slice[2], data_slice[1], data_slice[0]]
# sum over the rest of the tally filters
for tally_filter in tally.filters:
if type(tally_filter) == openmc.MeshFilter:
continue
if tally_filter in self.appliedFilters:
selected_bins = self.appliedFilters[tally_filter]
# sum filter data for the selected bins
data = data[np.array(selected_bins)].sum(axis=0)
else:
# if the filter is completely unselected,
# set all of it's data to zero and remove the axis
data[:, ...] = 0.0
data = _do_op(data, tally_value)
# filter by selected nuclides
if not nuclides:
data = 0.0
selected_nuclides = []
for idx, nuclide in enumerate(tally.nuclides):
if nuclide in nuclides:
selected_nuclides.append(idx)
data = _do_op(data[np.array(selected_nuclides)], tally_value)
# filter by selected scores
if not scores:
data = 0.0
selected_scores = []
for idx, score in enumerate(tally.scores):
if score in scores:
selected_scores.append(idx)
data = _do_op(data[np.array(selected_scores)], tally_value)
# get dataset's min/max
data_min = np.min(data)
data_max = np.max(data)
# set image data, reverse y-axis
image_data = data[::-1, ...]
# return data extents (in cm) for the tally
extents = [mesh.lower_left[h_ind], mesh.upper_right[h_ind],
mesh.lower_left[v_ind], mesh.upper_right[v_ind]]
return image_data, extents, data_min, data_max
class PlotView(openmc.lib.plot._PlotBase):
""" View settings for OpenMC plot.
Parameters
----------
origin : 3-tuple of floats
Origin (center) of plot view
width: float
Width of plot view in model units
height : float
Height of plot view in model units
Attributes
----------
origin : 3-tuple of floats
Origin (center) of plot view
width : float
Width of the plot view in model units
height : float
Height of the plot view in model units
h_res : int
Horizontal resolution of plot image
v_res : int
Vertical resolution of plot image
aspectLock : bool
Indication of whether aspect lock should be maintained to
prevent image stretching/warping
basis : {'xy', 'xz', 'yz'}
The basis directions for the plot
colorby : {'cell', 'material', 'temperature', 'density'}
Indication of whether the plot should be colored by cell or material
masking : bool
Indication of whether cell/material masking is active
maskBackground : 3-tuple of int
RGB color to apply to masked cells/materials
highlighting: bool
Indication of whether cell/material highlighting is active
highlightBackground : 3-tuple of int
RGB color to apply to non-highlighted cells/materials
highlightAlpha : float between 0 and 1
Alpha value for highlight background color
highlightSeed : int
Random number seed used to generate color scheme when highlighting
is active
domainBackground : 3-tuple of int
RGB color to apply to plot background
color_overlaps : bool
Indicator of whether or not overlaps will be shown
overlap_color : 3-tuple of int
RGB color to apply for cell overlap regions
cells : Dict of DomainView instances
Dictionary of cell view settings by ID
materials : Dict of DomainView instances
Dictionary of material view settings by ID
domainAlpha : float between 0 and 1
Alpha value of the geometry plot
plotVisibile : bool
Controls visibility of geometry
outlines: bool
Controls visibility of geometry outlines
tallyDataColormap : str
Name of the colormap used for tally data
tallyDataVisible : bool
Indicator for whether or not the tally data is visible
tallyDataAlpha : float
Value of the tally image alpha
tallyDataIndicator : bool
Indicates whether or not the data indicator is active on the tally colorbar
tallyDataMin : float
Minimum scale value for tally data
tallyDataMax : float
Minimum scale value for tally data
tallyDataLogScale : bool
Indicator of logarithmic scale for tally data
tallyMaskZeroValues : bool
Indicates whether or not zero values in tally data should be masked
clipTallyData: bool
Indicates whether or not tally data is clipped by the colorbar min/max
tallyValue : str
Indicator for what type of value is displayed in plots.
tallyContours : bool
Indicates whether or not tallies are displayed as contours
tallyContourLevels : str
Number of contours levels or explicit level values
selectedTally : str
Label of the currently selected tally
"""
def __init__(self, origin, width, height):
""" Initialize PlotView attributes """
super().__init__()
# View Parameters
self.level = -1
self.origin = origin
self.width = width
self.height = height
self.h_res = 1000
self.v_res = 1000
self.aspectLock = True
self.basis = 'xy'
# Geometry Plot
self.colorby = 'material'
self.masking = True
self.maskBackground = (0, 0, 0)
self.highlighting = False
self.highlightBackground = (80, 80, 80)
self.highlightAlpha = 0.5
self.highlightSeed = 1
self.domainBackground = (50, 50, 50)
self.overlap_color = (255, 0, 0)
self.domainAlpha = 1.0
self.domainVisible = True
self.outlines = False
self.colormaps = {'temperature': 'Oranges', 'density': 'Greys'}
# set defaults for color dialog
self.data_minmax = {prop: (0.0, 0.0) for prop in _MODEL_PROPERTIES}
self.user_minmax = {prop: (0.0, 0.0) for prop in _MODEL_PROPERTIES}
self.use_custom_minmax = {prop: False for prop in _MODEL_PROPERTIES}
self.data_indicator_enabled = {prop: False for prop in _MODEL_PROPERTIES}
self.color_scale_log = {prop: False for prop in _MODEL_PROPERTIES}
# Get model domain info
self.cells = self.getDomains('cell')
self.materials = self.getDomains('material')
# Tally Viz Settings
self.tallyDataColormap = 'spectral'
self.tallyDataVisible = True
self.tallyDataAlpha = 1.0
self.tallyDataIndicator = False
self.tallyDataUserMinMax = False
self.tallyDataMin = 0.0
self.tallyDataMax = np.inf
self.tallyDataLogScale = False
self.tallyMaskZeroValues = False
self.clipTallyData = False
self.tallyValue = "Mean"
self.tallyContours = False
self.tallyContourLevels = ""
self.selectedTally = None
def __hash__(self):
return hash(self.__dict__.__str__() + self.__str__())
@staticmethod
def getDomains(domain_type):
""" Return dictionary of domain settings.
Retrieve cell or material ID numbers and names from .xml files
and convert to DomainView instances with default view settings.
Parameters
----------
domain_type : {'cell', 'material'}
Type of domain to retrieve for dictionary
Returns
-------
domains : Dictionary of DomainView instances
Dictionary of cell/material DomainView instances keyed by ID
"""
if domain_type not in ('cell', 'material'):
raise ValueError("Domain type, {}, requested is neither "
"'cell' nor 'material'.".format(domain_type))
lib_domain = None
if domain_type == 'cell':
lib_domain = openmc.lib.cells
elif domain_type == 'material':
lib_domain = openmc.lib.materials
domains = {}
for domain, domain_obj in lib_domain.items():
name = domain_obj.name
domains[domain] = DomainView(domain, name, random_rgb())
# always add void to a material domain at the end
if domain_type == 'material':
void_id = _VOID_REGION
domains[void_id] = DomainView(void_id, "VOID",
(255, 255, 255),
False,
False)
return domains
def getDataLimits(self):
return self.data_minmax
def getColorLimits(self, property):
if self.use_custom_minmax[property]:
return self.user_minmax[property]
else:
return self.data_minmax[property]
@property
def llc(self):
if self.basis == 'xy':
x = self.origin[0] - self.width / 2.0
y = self.origin[1] - self.height / 2.0
z = self.origin[2]
elif self.basis == 'yz':
x = self.origin[0]
y = self.origin[1] - self.width / 2.0
z = self.origin[2] - self.height / 2.0
else:
x = self.origin[0] - self.width / 2.0
y = self.origin[1]
z = self.origin[2] - self.height / 2.0
return x, y, z
@property
def urc(self):
if self.basis == 'xy':
x = self.origin[0] + self.width / 2.0
y = self.origin[1] + self.height / 2.0
z = self.origin[2]
elif self.basis == 'yz':
x = self.origin[0]
y = self.origin[1] + self.width / 2.0
z = self.origin[2] + self.height / 2.0
else:
x = self.origin[0] + self.width / 2.0
y = self.origin[1]
z = self.origin[2] + self.height / 2.0
return x, y, z
def adopt_plotbase(self, view):
"""
Applies only the geometric aspects of a view to the current view
Parameters
----------
view : PlotView
View to take parameters from
"""
self.origin = view.origin
self.width = view.width
self.height = view.height
self.h_res = self.h_res
self.v_res = self.v_res
self.basis = view.basis
class DomainView():
""" Represents view settings for OpenMC cell or material.
Parameters
----------
id : int
Unique identifier for cell/material
name : str
Name of cell/material
color : 3-tuple of int or str
RGB or SVG color of cell/material (defaults to None)
masked : bool
Indication of whether cell/material should be masked
(defaults to False)
highlight : bool
Indication of whether cell/material should be highlighted
(defaults to False)
"""
def __init__(self, id, name, color=None, masked=False, highlight=False):
""" Initialize DomainView instance """
self.id = id
self.name = name
self.color = color
self.masked = masked
self.highlight = highlight
def __repr__(self):
return ("id: {} \nname: {} \ncolor: {} \
\nmask: {} \nhighlight: {}\n\n".format(self.id,
self.name,
self.color,
self.masked,
self.highlight))
def __eq__(self, other):
if isinstance(other, DomainView):
return self.__dict__ == other.__dict__
class DomainTableModel(QAbstractTableModel):
""" Abstract Table Model of cell/material view attributes """
def __init__(self, domains):
super().__init__()
self.domains = [dom for dom in domains.values()]
def rowCount(self, index=QModelIndex()):
return len(self.domains)
def columnCount(self, index=QModelIndex()):
return 6
def data(self, index, role=Qt.DisplayRole):
if not index.isValid() or not (0 <= index.row() < len(self.domains)):
return None
domain = self.domains[index.row()]
column = index.column()
if role == Qt.DisplayRole:
if column == ID:
return domain.id
elif column == NAME:
return domain.name if domain.name is not None else '--'
elif column == COLOR:
return '' if domain.color is not None else '+'
elif column == COLORLABEL:
return str(domain.color) if domain.color is not None else '--'
elif column == MASK:
return None
elif column == HIGHLIGHT:
return None
elif role == Qt.ToolTipRole:
if column == NAME:
return 'Double-click to edit'
elif column in (COLOR, COLORLABEL):
return 'Double-click to edit \nRight-click to clear'
elif column in (MASK, HIGHLIGHT):
return 'Click to toggle'
elif role == Qt.TextAlignmentRole:
if column in (MASK, HIGHLIGHT, COLOR):
return int(Qt.AlignCenter | Qt.AlignVCenter)
else:
return int(Qt.AlignLeft | Qt.AlignVCenter)
elif role == Qt.BackgroundColorRole:
color = domain.color
if column == COLOR:
if isinstance(color, tuple):
return QColor.fromRgb(*color)
elif isinstance(color, str):
return QColor.fromRgb(*openmc.plots._SVG_COLORS[color])
elif role == Qt.CheckStateRole:
if column == MASK:
return Qt.Checked if domain.masked else Qt.Unchecked
elif column == HIGHLIGHT:
return Qt.Checked if domain.highlight else Qt.Unchecked
return None
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return int(Qt.AlignLeft | Qt.AlignVCenter)
return int(Qt.AlignRight | Qt.AlignVCenter)
elif role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
headers = ['ID', 'Name', 'Color',
'SVG/RGB', 'Mask', 'Highlight']
return headers[section]
return int(section + 1)
return None
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
elif index.column() in (MASK, HIGHLIGHT):
return Qt.ItemFlags(Qt.ItemIsEnabled | Qt.ItemIsUserCheckable |
Qt.ItemIsSelectable)
elif index.column() in (NAME, COLORLABEL):
return Qt.ItemFlags(Qt.ItemIsEnabled | Qt.ItemIsEditable |
Qt.ItemIsSelectable)
elif index.column() == COLOR:
return Qt.ItemFlags(Qt.ItemIsEnabled | Qt.ItemIsEditable)
else:
return Qt.ItemFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
def setData(self, index, value, role=Qt.EditRole):
if not index.isValid() or not (0 <= index.row() < len(self.domains)):
return False
domain = self.domains[index.row()]
column = index.column()
if column == NAME:
domain.name = value if value else None
elif column == COLOR:
domain.color = value
elif column == COLORLABEL:
domain.color = value
elif column == MASK:
if role == Qt.CheckStateRole:
domain.masked = True if value == Qt.Checked else False
elif column == HIGHLIGHT:
if role == Qt.CheckStateRole:
domain.highlight = True if value == Qt.Checked else False
self.dataChanged.emit(index, index)
return True
class DomainDelegate(QItemDelegate):
def __init__(self, parent=None):
super().__init__(parent)
def sizeHint(self, option, index):
fm = option.fontMetrics
column = index.column()
if column == ID:
return QSize(fm.width("XXXXXX"), fm.height())
elif column == COLOR:
return QSize(fm.width("XXXXXX"), fm.height())
elif column == COLORLABEL:
return QSize(fm.width("X(XXX, XXX, XXX)X"), fm.height())
elif column == MASK:
return QSize(fm.width("XXXX"), fm.height())
else:
return QItemDelegate.sizeHint(self, option, index)
def createEditor(self, parent, option, index):
if index.column() == COLOR:
dialog = QColorDialog(parent)
return dialog
elif index.column() == COLORLABEL:
return QLineEdit(parent)
else:
return QItemDelegate.createEditor(self, parent, option, index)
def setEditorData(self, editor, index):
if index.column() == COLOR:
color = index.data(Qt.BackgroundColorRole)
color = 'white' if color is None else color
editor.setCurrentColor(color)
elif index.column() in (NAME, COLORLABEL):
text = index.data(Qt.DisplayRole)
if text != '--':
editor.setText(text)
def editorEvent(self, event, model, option, index):
if index.column() in (COLOR, COLORLABEL):
if not int(index.flags() & Qt.ItemIsEditable) > 0:
return False
if event.type() == QEvent.MouseButtonRelease \
and event.button() == Qt.RightButton:
self.setModelData(None, model, index)
return True
return False
else:
return QItemDelegate.editorEvent(self, event, model, option, index)
def setModelData(self, editor, model, index):
row = index.row()
column = index.column()
if column == COLOR and editor is None:
model.setData(index, None, Qt.BackgroundColorRole)
model.setData(model.index(row, column+1), None, Qt.DisplayRole)
elif column == COLOR:
color = editor.currentColor()
if color != QColor():
color = color.getRgb()[:3]
model.setData(index, color, Qt.BackgroundColorRole)
model.setData(model.index(row, column+1),
color,
Qt.DisplayRole)
elif column == COLORLABEL:
if editor is None:
model.setData(model.index(row, column-1),
None,
Qt.BackgroundColorRole)
model.setData(index, None, Qt.DisplayRole)
elif editor.text().lower() in openmc.plots._SVG_COLORS:
svg = editor.text().lower()
color = openmc.plots._SVG_COLORS[svg]
model.setData(model.index(row, column-1),
color,
Qt.BackgroundColorRole)
model.setData(index, svg, Qt.DisplayRole)
else:
try:
input = literal_eval(editor.text())
except (ValueError, SyntaxError):
return None
if not isinstance(input, tuple) or len(input) != 3:
return None
for val in input:
if not isinstance(val, int) or not 0 <= val <= 255:
return None
model.setData(model.index(row, column-1),
input,
Qt.BackgroundColorRole)
model.setData(index, input, Qt.DisplayRole)
else:
QItemDelegate.setModelData(self, editor, model, index)
|
plant.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Date of establishment: November 27, 2018
@author: zhangzd
"""
import cv2 #导入cv2模块
import requests #导入requests模块
import json #导入json模块
import threading #导入threading模块
import time #导入时间模块
import base64 #导入base64模块
import numpy as np #导入numpy模块
from PIL import Image, ImageDraw, ImageFont #导入PIL模块
access_token = "" #定义sccess_token变量
API_KEY = "YNoddBhqFuPn0gsrqG6lxk7A" #定义API_KEY变量
SECRET_KEY = "p5dyxaxz2XiHCtnL1HagsQBzOBv5nEzA" #定义SECRET_KEY变量
frame = None #定义frame变量
now_time = 0 #定义now_time变量
plant_info = None #定义plant_info变量
def cvimg_to_b64(img):
"""
图片转换函数,将二进制图片转换为base64加密格式
"""
try:
image = cv2.imencode('.jpg', img)[1] #将图片格式转换(编码)成流数据,赋值到内存缓存中
base64_data = str(base64.b64encode(image))[2:-1] #将图片加密成base64格式的数据
return base64_data #返回加密后的结果
except Exception as e:
return "error"
def get_ai_access_token():
"""
获取token值
"""
url = "https://aip.baidubce.com/oauth/2.0/token?grant_type=" + \
"client_credentials&client_id=%s&client_secret=%s" % (API_KEY, SECRET_KEY)
try:
response = requests.get(url)
res_text = response.text
res_json = json.loads(res_text)
return str(res_json["access_token"])
except Exception:
return "error"
def get_plant(img64):
url = "https://aip.baidubce.com/rest/2.0/image-classify/v1/plant"
url = url + "?access_token=" + access_token
data = {
"image": img64, "type": 'plant'
}
try:
response = requests.post(url,data=data)
res_text=response.content.decode("utf-8")
res_json=json.loads(res_text)
return res_json
except Exception:
return "error"
def post_request(frame, nt):
"""
判断识别的是植物还是植物,并提取有效数据
"""
global plant_info
if time.time() - nt > 3: #判断时间差是否大于3
global now_time #声明now_time是全局变量
now_time = time.time() #给now_time重新赋值为当前秒数
img64 = cvimg_to_b64(frame) #调用cvimg_to_b64函数
res = get_plant(img64) #调用get_plant函数
if "error_msg" in res:
if res["error_msg"] == 'Open api daily request limit reached':
raise Exception('Open api daily request limit reached')
if "error" not in res: #判断识别是否出错
try:
plant_info = res["result"] #将识别出来的结果赋值给plant_info
except:
pass
return #退出函数
def put_Text(cvimg, text, location, size=30):
"""
将动植物信息显示在屏幕上
"""
cvimg = Image.fromarray(cv2.cvtColor(cvimg, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(cvimg)
fontText = ImageFont.truetype("./simsun.ttc", size, encoding="utf-8")
draw.text(location, text, (255, 0, 0), font=fontText)
cvimg = cv2.cvtColor(np.array(cvimg), cv2.COLOR_RGB2BGR)
return cvimg
def main():
"""
程序主函数
"""
token = get_ai_access_token()
if token != "error":
global access_token
access_token = token
cap = cv2.VideoCapture(0) #创建摄像头对象
global now_time #声明now_time为全局变量
now_time = time.time() #将当前时间秒数赋值给now_time
while (True): #创建一个死循环用于循环读取摄像头数据
ret, frame = cap.read() #从摄像头中读取一张图片
if ret == True: #判断是否读取成功
#创建一个1280x800的窗口
frame1 = cv2.resize(frame, (1280, 800), interpolation=cv2.INTER_LINEAR)
#创建一个线程用于处理读取到的图片
t=threading.Thread(target=post_request,args=(frame,now_time,), name='POST_REQUEST')
t.start() #启动这个线程
if not plant_info or plant_info[0]["name"]=="非植物":
frame1 = put_Text(frame1, "Waiting...", (50, 50)) #在画布上显示Waiting
elif plant_info[0]["name"]!="非植物":
print(plant_info[0]) #打印植物信息
try:
#在画布上写字
#for i in range(5):
frame1 = put_Text(frame1, str(plant_info[0]["score"])[:4], (150, 0 * 70 + 50))
frame1 = put_Text(frame1, str(plant_info[0]["name"]), (320, 0 * 70 + 50))
#for i in range(5):
frame1 = put_Text(frame1, "score:", (50, 0 * 70 + 50))
frame1 = put_Text(frame1, "name:", (250, 0 * 70 + 50))
except Exception:
pass
cv2.imshow('Magic Image', frame1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
part1.py
|
#!/usr/bin/env python3
import sys
from program import Program
import threading
class Robot:
def __init__(self, program):
self.__program = program
self.__pos = (0, 0)
self.__dir = (0, 1)
self.__painted_zone = set()
self.__white_zone = set([(0, 0)])
def execute(self):
self.__threads = [
threading.Thread(target=self.__program.execute),
threading.Thread(target=self.feed),
]
for thread in self.__threads:
thread.start()
for thread in self.__threads:
thread.join()
print(len(self.__painted_zone))
def print(self):
x_range = (-10, 50)
y_range = (-10, 5)
for y in range(y_range[1], y_range[0] - 1, -1):
for x in range(x_range[0], x_range[1] + 1):
pos = (x, y)
value = "."
if pos == self.__pos:
if self.__dir == (1, 0):
value = ">"
elif self.__dir == (0, 1):
value = "^"
elif self.__dir == (-1, 0):
value = "<"
else:
value = "v"
elif pos in self.__white_zone:
value = "#"
print(value, end="")
print("")
print("====================")
def action(self):
self.print()
color = 0
if self.__pos in self.__white_zone:
color = 1
print("send", color)
self.__program.send_input(color)
action = next(self.__program)
rotation = next(self.__program)
if action == 1:
self.__white_zone.add(self.__pos)
print("paint it white")
else:
self.__white_zone.discard(self.__pos)
self.__painted_zone.add(self.__pos)
self.update_direction(rotation)
self.move()
def move(self):
print("move from ", self.__pos, "with", self.__dir, end="")
self.__pos = (self.__pos[0] + self.__dir[0], self.__pos[1] + self.__dir[1])
print("to", self.__pos)
def update_direction(self, rotation):
if rotation == 0: # Turn 90 left
print("Turn left", self.__pos, self.__dir)
if self.__dir == (1, 0):
self.__dir = (0, 1)
elif self.__dir == (0, 1):
self.__dir = (-1, 0)
elif self.__dir == (-1, 0):
self.__dir = (0, -1)
else:
self.__dir = (1, 0)
else: # Turn 90 right
print("Turn right", self.__pos, self.__dir)
if self.__dir == (1, 0):
self.__dir = (0, -1)
elif self.__dir == (0, -1):
self.__dir = (-1, 0)
elif self.__dir == (-1, 0):
self.__dir = (0, 1)
else:
self.__dir = (1, 0)
def feed(self):
while self.__program.is_running():
self.action()
def main():
data = sys.stdin.readline()
r = Robot(Program(data, 1))
r.execute()
if __name__ == "__main__":
main()
|
test_for_multithreading.py
|
from threading import Thread
def test():
import QUANTAXIS as QA
global QA
QA.QA_util_log_info('指数日线')
if __name__ == '__main__':
t = Thread(target=test, args=())
t.start()
t.join()
import QUANTAXIS as QA
QA.QA_util_log_info('指数日线')
|
__init__.py
|
import math
import threading
import time
import uuid
from contextlib import contextmanager
import redis
from .utils import gen_lock_name, subscribe, get_list_args
class Redisz:
# ------------------------------------------ sys ------------------------------------------
def __init__(self, url, **kwargs):
"""
描述:
初始化redis, url可以指定redis的账号,密码,地址,端口,数据库等信息, 也可以通过关键字参数指定其他连接属性.
参数:
url:str -redis地址url,格式如下
-redis://[[username]:[password]]@localhost:6379/0
-rediss://[[username]:[password]]@localhost:6379/0
-unix://[[username]:[password]]@/path/to/socket.sock?db=0
示例:
rdz = redisz.Redisz('redis://127.0.0.1', decode_responses=True)
rdz = redisz.Redisz('redis://127.0.0.1:6379')
rdz = redisz.Redisz('redis://127.0.0.1:6379/0')
"""
if not url.lower().startswith(("redis://", "rediss://", "unix://")):
url = 'redis://' + url
if 'decode_responses' not in kwargs:
kwargs['decode_responses'] = True
self.redis_ins = redis.Redis(connection_pool=redis.ConnectionPool.from_url(url, **kwargs))
def get_redis(self):
"""
描述:
返回redis.Redis对象, 使用redis.Redis方法操作redis.
返回:
redis - redis.Redis对象
示例:
rds = rdz.get_redis()
rdz.set('test:name', 'Zhang Tao')
"""
return self.redis_ins
def get_redis_pipeline(self, transaction=True):
"""
描述:
返回Pipeline流水线对象, 通过Pipeline可以实现事务和减少交互次数
参数:
transaction -是否是事务性流水线, 如果只需要流水线, 不需要事务, 可以设置transaction=False
返回:
pipeline:Pipeline -流水线对象
示例:
pipe = rdz.get_redis_pipeline()
pipe.set('test:name', 'Zhang Tao')
pipe.hset('test:taozh', 'name', 'Zhang Tao')
pipe.sadd('test:letters', 'a', 'b', 'c')
pipe.execute() # 虽然多次操作, 但是客户端只会提交一次
"""
return self.get_redis().pipeline(transaction)
@contextmanager
def redis_pipeline(self, transaction=True):
"""
描述:
Redis pipeline对象上下文管理器, 通过pipeline可以事物/非事物【流水线】的方式对redis进行操作
通过流水线可以减少客户端和服务器端的交互次数(一次提交)
如果是事务性流水线, 当多个客户端同时处理数据时, 可以保证当前调用不会被其他客户端打扰
参数:
transaction:bool -是否是事务性流水线, 如果只需要流水线, 不需要事务, 可以设置transaction=False
返回:
pipe:Pipeline -Pipeline流水线对象
示例:
with rdz.redis_pipeline(False) as pipe:
pipe.set('test:name', 'Zhang Tao') #虽然多次操作, 但是客户端只会提交一次
pipe.hset('test:taozh', 'name', 'Zhang Tao')
pipe.sadd('test:letters', 'a', 'b', 'c')
"""
pipe = self.get_redis().pipeline(transaction)
try:
if transaction is True:
pipe.multi()
yield pipe
pipe.execute()
except Exception as e:
raise e
# ------------------------------------------ global ------------------------------------------
def get_type(self, name):
"""
描述:
返回name对应的键值类型
参数:
name:string -要检测的键名
返回:
type:str -name键名对应的键值类型, 有如下类型string/list/hash/set/zset/none(不存在)
示例:
rdz.get_type('test:string') # string
rdz.get_type('test:list') # list
rdz.get_type('test:hash') # hash
rdz.get_type('test:set') # set
rdz.get_type('test:zset') # zset
rdz.get_type('test:not-exist') # none
"""
return self.get_redis().type(name)
def exists(self, names, *args, return_number=False):
"""
描述:
返回由names指定的一个或多个名字键值对是否存在, 如果都存在返回True, 有一个不存在则返回False
参数:
names -要检测的键名或键名list
*args -通过位置参数传递的多个检测键名
return_number:bool -return_number=True, 返回指定names中存在的数量
返回:
is_exist:bool/int -如果都存在返回True, 有一个不存在则返回False, 如果return_number=True, 则返回指定names中存在的数量
示例:
rdz.exists('test:name') # 单个检测
rdz.exists('test:name', 'test:age') # 以关键字参数的方式检测多个
rdz.exists(['test:name', 'test:age']) # 以列表的方式检测多个
rdz.exists(['test:name', 'test:age'], return_number) # 返回存在的个数
"""
names = get_list_args(names, args)
result = self.get_redis().exists(*names)
if return_number is True:
return result
count = len(names)
if result == count:
result = True
else:
result = False
return result
def keys(self, pattern="*", **kwargs):
"""
描述:
获取redis中所有的键名称列表, 可以根据pattern进行过滤
返回:
names:list -所有键名的列表
参数:
pattern:str -过滤选项, 有如下可选项
h?llo -matches hello, hallo and hxllo
h*llo -matches hllo and heeeello
h[ae]llo -matches hello and hallo, but not hillo
h[^e]llo -matches hallo, hbllo, ... but not hello
h[a-b]llo -matches hallo and hbllo
示例:
rdz.keys() # 返回所有键名列表
rdz.keys('test:*') # 返回以test:开头的键名列表
"""
return self.get_redis().keys(pattern=pattern, **kwargs)
def delete(self, names, *args):
"""
描述:
删除一个或多个name对应的键值, 并返回删除成功的数量
参数:
names:str|list -要删除的键值
返回:
count:int -删除成功的数量
示例:
rdz.delete('test:n1') # 单个删除
rdz.delete('test:n1', 'test:n2') # 关键字参数多个删除
rdz.delete(['test:n1', 'test:n2']) # 列表多个删除
rdz.delete(['test:n1', 'test:n2'], 'test:n3') # 列表和关键字参数一起使用
"""
names = get_list_args(names, args)
if len(names) == 0:
return 0
return self.get_redis().delete(*names)
def rename(self, src, dst, nx=False):
"""
描述:
将src重命名为dst, 将dst_nx设为True可以确保在dst键值不存在时才进行重命名, 默认直接重命名
参数:
src:str -要重命名的键名, 如果不存在则会引发异常
dst:str -新的键名
nx:bool -nx设置True, 只有dst键名不存在时才会重命名
返回:
result:bool -如果操作成功返回True, 否则返回False
示例:
rdz.rename('old_name', 'new_name')
rdz.rename('old_name', 'new_name', nx=True) # 只有当new_name不存在时, 才会进行重命名操作
"""
if nx is True:
return self.get_redis().renamenx(src, dst)
return self.get_redis().rename(src, dst)
def ttl(self, name):
"""
描述:
以秒为单位, 返回指定键值的剩余存在时间(time to live)
参数:
name:string -redis的健名
返回:
time:int -指定键值的剩余存在时间(秒)
--如果指定键值不存在, 返回-2
--如果指定键值存在, 但没有过期时间, 返回-1
示例:
rdz.ttl('test:list') # 90, 剩余90秒
rdz.ttl('test:set') # -1, test:set存在, 但是没有过期时间
rdz.ttl('test:not-exist') # -2, test:not-exist键值不存在
"""
return self.get_redis().ttl(name)
def expire(self, name, seconds):
"""
描述:
为键值设置超时时间, 超时以后自动删除对应的键值对
请注意超时时间只能对整个键值进行设置, 不能对于键值中的子项进行设置
参数:
name:string -要设置超时的键名
seconds:int -超时时间, 单位是秒
返回:
result:bool -如果设置成功返回True, 否则返回False(键值不存在)
示例:
rdz.expire('test:exist', 10) # 10秒以后移除test:ex键值
rdz.expire('test:not-exist', 10) # not-exist不存在, 返回False
"""
return self.get_redis().expire(name, seconds)
def expireat(self, name, when):
"""
描述:
为键值设置超时时间点, 超时以后自动删除对应的键值对
请注意:
-如果设置的是当前时间之前的时间点, 则键值会被【立刻】删除
-超时时间只能对整个键值进行设置, 不能对于键值中的子项进行设置
参数:
name:string -要设置超时的键名
when:int -超时时间点(unix timestamp)
返回:
result:bool -如果设置成功返回True, 否则返回False(键值不存在)
示例:
rdz.expireat('test:ex', 1648252800) # 1648252800=2022年3月26日0点
"""
return self.get_redis().expireat(name, when)
def persist(self, name):
"""
描述:
移除指定键值的过期时间
参数:
name:string -redis的健名
返回:
result:bool -如果移除成功返回True, 如果失败返回False(键值不存在)
示例:
rdz.persist('test:list') # True
rdz.persist('test:not-exist') # false
"""
return self.get_redis().persist(name)
def sort(self, name, start=None, num=None, by=None, get=None, desc=False, alpha=False, store=None, groups=False):
"""
描述:
对列表、集合或有序集合中的元素进行排序, 默认按数字从小到大排序, 类似于sql中的order by语句,
可以实现如下功能
-根据降序而不是默认的升序来排列元素
-将元素看作是数字进行排序
-将元素看作是二进制字符串进行排序
-使用被排序元素之外的其他值作为权重来进行排序
-可以从输入的列表、集合、有序集合以外的其他地方进行取值
参数:
name:string -redis的健名
start:int -对【已排序】的数据进行分页过滤, 和num结合使用
num:int -对【已排序】的数据进行分页过滤, 和start结合使用
by: -使用其他外部键对项目进行加权和排序, 使用“*”指示项目值在键中的位置, 设置by=nosort禁止排序
get: -从外部键返回项目, 而不是排序数据本身, 使用“*”指示项目值在键中的位置
desc:bool -设置desc=True, 按从大到小排序
alpha:bool -按字符排序而不是按数字排序, 如果要排序的值不是数字, 请设置alpha=True, 否则会引发异常
store:string -按排序结果存储到指定的键值
groups:bool -如果groups=True,并且get函数返回至少两个元素, 排序将返回一个元组列表, 每个元组包含从参数中获取的值“get”。
返回:
sorted:list|int -排序成功的元素列表, 如果设置了store, 则返回元素个数
示例:
# test:sort=[6, 88, 112, 18, 36]
# test:sort-weight={'d-6': 1, 'd-88': 2, 'd-112': 3, 'd-18': 4, 'd-36': 5, }
rdz.sort('test:sort') # ['6', '18', '36', '88', '112'], 默认按数字进行升序排列
rdz.sort('test:sort', desc=True) # ['112', '88', '36', '18', '6'], 降序排列
rdz.sort('test:sort', alpha=True) # ['112', '18', '36', '6', '88'], 按字母排序
rdz.sort('test:sort', start=1, num=3) # ['18', '36', '88'], 截取从第一个开始的三个元素
rdz.sort('test:sort', store='test:sort-1') # 返回5, test:sort-1=['6', '18', '36', '88', '112']
# test:obj-ids=[1, 3, 2]
# test:obj-1={'name': '1a', 'weight': 33}
# test:obj-2={'name': '2b', 'weight': 22}
# test:obj-3={'name': '3c', 'weight': 11}
rdz.sort('test:obj-ids', by='test:obj-*->weight') # ['3', '2', '1'], 根据id找到其对应的对象中的属性(weight), 然后通过对象属性进行排序
rdz.sort('test:obj-ids', by='test:obj-*->weight', get='test:obj-*->name') # ['3c', '2b', '1a'], 根据id找到对象的属性进行排序, 然后返回对象的name属性
rdz.sort('test:obj-ids', get='test:obj-*->name') # ['1a', '2b', '3c'], 对test:obj-ids进行排序以后([1,2,3]), 然后根据排序以后的id依次从对象中获取指定的属性并返回
rdz.sort('test:obj-ids', by='nosort', get='test:obj-*->name') # ['1a', '3c', '2b'], 特殊应用, 不排序,只根据id返回对象的属性并返回
"""
return self.get_redis().sort(name, start=start, num=num, by=by, get=get, desc=desc, alpha=alpha, store=store, groups=groups)
# ------------------------------------------ str ------------------------------------------
def str_set(self, name, value, **kwargs):
"""
描述:
设置值, 默认情况, name对应的键值不存在则创建, 存在则替换,
值的类型可以是: 字符串/整数/浮点数
参数:
name:string -redis中的键名
value:str/int/float -要设置的值
kwargs可选参数如下:
ex:int -过期时间(秒
px:int -过期时间(毫秒)
nx:bool -如果设置为True, 则只有name不存在时, 当前set操作才执行
xx:bool -如果设置为True, 则只有name存在时, 当前set操作才执行
返回:
result:bool - 如果设置成功返回True, 否则返回False
示例:
rdz.str_set('test:name', 'Zhang Tao')
rdz.str_set('test:age', 18)
rdz.str_set('test:email', 'taozh@cisco.com')
"""
result = self.get_redis().set(name, value, **kwargs)
if result is None:
return False
return result
def str_get(self, name):
"""
描述:
返回name对应的字符串类型键值, 如果键值不存在则返回None
参数:
name:string -redis中的键名
返回:
value:string -字符串键值, 如果不存在返回None
示例:
#test:name='Zhang Tao'
rdz.str_get('test:name') # Zhang Tao
rdz.str_get('test:not-exist') # None
"""
return self.get_redis().get(name)
def str_mset(self, mapping):
"""
描述:
批量设置多个字符串类型的键值
参数:
mapping:dict -包含多个键值的字典
返回:
result:bool -设置成功返回True
示例:
rdz.str_mset({'test:name': 'Zhang Tao', 'test:age': '18', 'test:email': 'taozh@cisco.com'})
"""
return self.get_redis().mset(mapping)
def str_mget(self, names, *args):
"""
描述:
批量获取字符串类型键值list,
参数:
names:list -要获取的键名列表
返回:
values:list -获取到的键值列表, 如果只有一个name, 返回结果也是list
示例:
# test:name = 'Zhang Tao', test:ag= '18', test:email= 'taozh@cisco.com'}
rdz.str_mget('test:name') # ['Zhang Tao']
rdz.str_mget('test:name', 'test:age') # ['Zhang Tao', '18']
rdz.str_mget(['test:name', 'test:age'], 'test:email') # ['Zhang Tao', '18', 'taozh@cisco.com']
rdz.str_mget('test:name', 'test:not-exist') # ['Zhang Tao', None]
"""
return self.get_redis().mget(names, *args)
def str_append(self, name, value):
"""
描述:
在name对应的字符串类型键值后面追加内容, name对应的字符串类型键值不存在则创建并赋值
参数:
name:string -redis的键名
value:string/int/float -要追加的内容
返回:
length:int -添加成功的字符串【字节】长度(一个汉字三个字节)
示例:
# 'test:email'='taozh@cisco.com' --15
rdz.str_append('test:email', None) # 15
rdz.str_append('test:email', '.cn') # 18, test:email-> taozh@cisco.com.cn
rdz.str_append('test:not-exist', '.cn') # 3, test:not-exist-> .cn
"""
if value is None:
return self.str_len(name)
return self.get_redis().append(name, value)
def str_getset(self, name, value):
"""
描述:
设置新值并获取原来的值, 如果name对应的键值不存在则创建, 并返回None
参数:
name:string -redis的键名
value:string|int|float -要设置的新值
返回:
old_value:string -原来的值, 如果简直不存在, 则返回None
示例:
#test:age=18
rdz.str_getset('test:age', 19) # 返回18, test:age -> 19
rdz.str_getset('test:not-exist', 'new value') # 返回None, test:not-exist -> new value
"""
return self.get_redis().getset(name, value)
def str_setrange(self, name, offset, value):
"""
描述:
修改字符串内容, 从指定字符串字节索引开始向后替换, 新值太长时, 则向后添加
替换时【包含】offset索引处的字符
请注意是【字节】非字符, 一个汉字3个字节
参数:
name:string -redis的键名
offset:int -替换开始位置的索引
value:string -要替换的字符
返回:
length:int -修改成功以后的【字节】长度
示例:
# test:email=taozh@cisco.com
rdz.str_setrange('test:email', 6, '1982@gmail.com') # 20, test:email -> taozh1982@cisco.com
# test:study=好好学习
rdz.str_setrange('test:study', 6, '工作') # 12, test:study -> 好好工作, 一个汉字3个字节, 所以从6开始
"""
return self.get_redis().setrange(name, offset, value)
def str_getrange(self, name, start, end):
"""
描述:
根据【字节】索引获取获取子串, 子串既包括start又包括end索引处的字节
start和end可以为负数, 最后一个字符的索引是-1, 倒数第二个字符的索引是-2, 以此类推
请注意是【字节】非字符, 一个汉字3个字节
返回:
result:string -子字符串
参数:
name:string -redis的键名
start:int -开始字节索引
end:int -结束字节索引
示例:
# test:email=taozh@cisco.com
rdz.str_getrange('test:email', 0, 4) # taozh, 索引0-4的5个字节
rdz.str_getrange('test:email', -3, -1) # com, 索引-2 - -1的2个字节
# test:study=好好学习
rdz.str_getrange('test:study', 0, 2) # 好, 索引0-2的3个字节, 一个汉字3个字节
"""
return self.get_redis().getrange(name, start, end)
def str_len(self, name):
"""
描述:
返回name对应值的字节长度(一个汉字3个字节, 如果键值不存在, 返回0
参数:
name:str -redis的键名
返回:
length:int -键值的字节长度, 如果不存则, 则返回0
示例:
# test:email=taozh@cisco.com
rdz.str_len('test:email') # 15
# test:zh=好好学习
rdz.str_len('test:zh') # 12, 3*4=12个字节
rdz.str_len('test:not-exist') # 0
"""
return self.get_redis().strlen(name)
def str_incr(self, name, amount=1):
"""
描述:
自增name对应的键值, 返回结果为自增以后的值
当name不存在时, 则创建键值并赋值为amount
amount必须是【整数】, 可以为负数, 负数表示自减
如果name对应的键值不是整数(包括浮点数), 会引发异常
参数:
name:string -redis的键名
amount:int -增加的数值
返回
value:int -自增以后的值
示例:
# test:count=18
rdz.str_incr('test:count') # 19
rdz.str_incr('test:count', 2) # 21
rdz.str_incr('test:count', -1) # 20
rdz.str_incr('test:not-exist') # 1, test:not-exist不存在, 创建test:not-exist, 并赋值为1
rdz.str_incr('test:email') # test:email不是整数, 引发ResponseError异常
rdz.str_incr('test:float-1.1') # test:float-1.1不是整数, 引发ResponseError异常
"""
return self.get_redis().incrby(name, amount=amount)
def str_decr(self, name, amount=1):
"""
描述:
自减name对应的值, 返回结果为自减以后的值
当name不存在时, 则创建键值并赋值为-amount(-1)
amount必须是【整数】, 可以为负数, 负数表示自增
如果name对应的键值不是整数(包括浮点数), 会引发异常
参数:
name:string -redis的键名
amount:int -减去的数值
返回
value:int -自减以后的值
示例:
# test:count=10
rdz.str_decr('test:count') # 9
rdz.str_decr('test:count', 2) # 7
rdz.str_decr('test:count', -1) # 8
rdz.str_decr('test:not-exist') # -1, test:not-exist不存在, 创建test:not-exist, 并赋值-1
rdz.str_decr('test:email') # test:email不是整数, 引发异常
rdz.str_decr('test:float-1.1') # test:float-1.1不是整数, 引发异常
"""
return self.get_redis().decrby(name, amount=amount)
def str_incrfloat(self, name, amount=1.0):
"""
浮点类型的自增操作, 请参考str_incr
"""
return self.get_redis().incrbyfloat(name, amount=amount)
def str_decrfloat(self, name, amount=1.0):
"""
浮点类型的自减操作, 请参考str_decr
"""
return self.get_redis().incrbyfloat(name, amount=-amount)
# ------------------------------------------ list ------------------------------------------
def list_push(self, name, values, *args, left=False, xx=False):
"""
描述:
向指定列表中增加一个或多个值, 默认情况, 如果列表不存在则新建并赋值
默认添加到列表的右边, 如果left参数为True, 则添加到列表的左边
如果xx为True, 则只有指定列表存在时才会添加, 并且一次只能加一个值
参数:
name:string -redis的键名
values:list -要添加的元素列表
*args -也可以通过位置参数的方式添加多个元素
left:bool -设置left=True, 会将元素添加到列表的左侧, 请注意添加的多个值在列表中的顺序跟传递参数的顺序【相反】(请参考示例)
xx:bool -设置xx为True, 只会将【一个】元素添加到已有列表, 如果列表不存在, 则不会创建
返回:
length:int - 整个列表的长度, 如果xx=True, 但是列表不存在则返回0
示例:
rdz.list_push('test:numbers', 3, 4) # 2, test:numbers -> ['3', '4']
rdz.list_push('test:numbers', [2, 1], left=True) # 4, test:numbers -> ['1', '2', '3', '4'], 请注意1和2的顺序
rdz.list_push('test:numbers', [5, 6], 7) # 7, test:numbers -> ['1', '2', '3', '4', '5', '6', '7']
rdz.list_push('test:not-exist', 1, xx=True) # 0, test:not-exist不存在, 因为xx为True, 所以不会新建list
"""
values = get_list_args(values, args)
r = self.get_redis()
if left is True:
if xx is True:
return r.lpushx(name, *values)
return r.lpush(name, *values)
else:
if xx is True:
return r.rpushx(name, *values)
return r.rpush(name, *values)
def list_insert(self, name, ref_value, value, before=False):
"""
描述:
在指定列表的指定参考值前或后插入一个新值, where参数指定插入的位置(before/after), 默认插入到指定值的后边
如果指定列表不存在, 则不做处理
参数:
name:string -redis的键名
ref_value:sting|int|float -参考值, 如果为None, 则会引发异常
*args -也可以通过位置参数的方式添加多个元素
left:bool -设置left=True, 会将元素添加到列表的左侧, 请注意添加的多个值在列表中的顺序跟传递参数的顺序【相反】(请参考示例)
返回:
length:int
-如果插入成功, 返回整个列表的长度
-如果列表不存在, 返回0
-如果ref_value在列表中不存在, 返回-1
示例:
# test:numbers = ['1', '3', '5']
rdz.list_insert('test:numbers', 1, 2) # 把2插入到1后边, test:numbers -> ['1', '2', '3', '5']
rdz.list_insert('test:numbers', 5, 4, before=True) # 把4插入到5前, test:numbers -> ['1', '2', '3', '4', '5']
rdz.list_insert('test:numbers', 10, 11) # 返回-1, 不做处理
rdz.list_insert('test:not-exist', 1, 2) # 返回0, 不做处理
"""
where = 'after'
if before is True:
where = 'before'
return self.get_redis().linsert(name, where, ref_value, value)
def list_set(self, name, index, value):
"""
描述:
对指定列表中的指定索引位置重新赋值
如果列表不存在/index超出范围/value不是字符串, 都会引发异常
参数:
name:string -redis的键名
index:int -重新赋值的索引, 支持负数(-1表示最后一个元素索引), 必须是列表范围内的索引, 否则会引发异常
value:string|int|float -新值, 除string|int|float以外的类型, 都会引发异常
返回:
result:bool -如果赋值成功返回True
示例:
# test:numbers = ['1', 'b', 'c']
rdz.list_set('test:numbers', 1, 2) # 把第一个元素('b')替换成2, test:numbers -> ['1', '2', 'c']
rdz.list_set('test:numbers', -1, 3) # 把最后一个元素('c')替换成3, test:numbers -> ['1', '2', '3']
"""
return self.get_redis().lset(name, index, value)
def list_pop(self, name, count=None, left=False):
"""
描述:
将指定列表的右侧第一个元素移除并返回, 可以通过left参数设置从左侧移除
-如果列表不存在, 无论count是几个, 返回None
-如果列表没有值, 无论count是几个, 返回None
-如果count=0, 返回None
-如果count>1时, 如果列表个数小于count, 返回由移除元素组成的列表, 即便只移除了一个元素, 返回的也是列表
参数:
name:string -redis的键名
count:int -要移除的个数
left:bool -默认从右侧移除元素, 如果left=True, 则从左侧移除元素
返回:
item:string|list - 移除的元素或元素组成的列表
示例:
# test:numbers = ['1', '2', '3', '4', '5', '6']
rdz.list_pop('test:numbers', 0) # 返回None
rdz.list_pop('test:numbers') # 移除最右侧的元素, 返回结果为6, test:numbers -> ['1', '2', '3', '4', '5'])
rdz.list_pop('test:numbers',2) # 移除最右侧的2个元素, 返回结果为['5', '4'], test:numbers -> ['1', '2', '3']
rdz.list_pop('test:numbers',left=True) # 返回结果为1, test:numbers -> ['2', '3']
rdz.list_pop('test:numbers',3) # 返回结果为['3', '2'], test:numbers -> []
rdz.list_pop('test:not-exist') # 返回None
"""
r = self.get_redis()
if left is True:
return r.lpop(name, count=count)
else:
return r.rpop(name, count=count)
def list_rem(self, name, value, count=1):
"""
描述:
在指定列表中删除指定的值, 默认删除第一个等于value的值, 返回值为删除的值个数
count指定了删除个数和删除方向
count > 0: 从左向右删除指定个数的等于value的值
count < 0: 从右向左删除指定个数的等于value的值
count = 0: 删除所有等于value的元素
参数:
name:string -redis的键名
value:string|int|float -要删除的值
count:int - 删除的个数和删除方向
返回:
count:int -删除的个数, 如果列表不存在或value在列表中不存在, 返回0
示例:
# test:numbers = ['1', '2', '3', '4', '5', '6', '5', '4', '3', '2', '1']
rdz.list_rem('test:numbers', 1) # 1, 从左向右删除第一个1 -> ['2', '3', '4', '5', '6', '5', '4', '3', '2', '1']
rdz.list_rem('test:numbers', 2, -1) # 1, 从后向前删除第一个2 -> ['2', '3', '4', '5', '6', '5', '4', '3', '1']
rdz.list_rem('test:numbers', 4, 0) # 2, 删除所有的 -> ['2', '3', '5', '6', '5', '3', '1']
rdz.list_rem('test:numbers', 10) # 值在列表中不存在, 返回0
rdz.list_rem('test:not-exist', 10) # 列表不存在, 返回0
"""
return self.get_redis().lrem(name, count, value)
def list_trim(self, name, start, end):
"""
描述:
在指定列表中移除【没有】在start-end索引之间的值, start和end索引处的值不会被移除
只保留start<=索引<=end的值, 如果start>end或者start<0, list所有的值都会被移除
参数:
name:string -redis的键名
start:int -开始索引, start索引处的值不会被移除
end:int -结束索引, end索引处的值不会被移除
返回:
result:bool -True
示例:
# test:numbers = ['1', '2', '3', '4', '5', '6']
rdz.list_trim('test:numbers', 1, 3) # 把索引在1-3以外的值移除, test:numbers -> ['2', '3', '4']
rdz.list_trim('test:numbers', -1, 1) # start<0, test:numbers -> []
rdz.list_trim('test:numbers', 3, 1) # start>end, test:numbers -> []
"""
return self.get_redis().ltrim(name, start, end)
def list_index(self, name, index):
"""
描述:
在指定列表中根据索引获取列表元素
如果列表不存在/index超出了列表范围, 返回None
参数:
name:string -redis的键名
index:int -索引, 支持负数, 最后一个元素的索引是-1
返回:
result:string -索引处的元素, 如果列表不存在/index超出了列表范围, 返回None
示例:
# test:numbers = ['1', '2', '3', '4', '5', '6']
rdz.list_index('test:numbers', 1) # 索引为1的值为:'2'
rdz.list_index('test:numbers', -1) # 索引为-1的值为:'6'
rdz.list_index('test:numbers', 10) # index超出了列表范围:None
rdz.list_index('test:not-exist', 0) # 列表不存:None
"""
return self.get_redis().lindex(name, index)
def list_len(self, name):
"""
描述:
获取指定列表的长度
如果列表不存在, 返回0
参数:
name:string -redis的键名
返回:
length:init -列表的长度, 如果列表不存在, 返回0
示例:
# test:numbers = ['1', '2', '3', '4', '5', '6']
list_len('test:numbers') # 6
list_len('test:not-exist') # 0
"""
return self.get_redis().llen(name)
def list_range(self, name, start, end):
"""
描述:
返回指定列表在start和end范围内的数据list
如果指定列表不存在, 返回[]
返回list中包含start和end索引处的数据项
如果start或end超出了列表的索引范围, 只会返回列表索引范围内的数据列表
如果要返回所有数据项, 可以通过start=0 & end=-1进行获取
参数:
name:string -redis的键名
start:int -开始索引
end:int -结束索引
返回:
result:list -列表在start和end范围内数据组成的列表
示例:
# test:numbers = ['1', '2', '3', '4', '5', '6']
rdz.list_range('test:numbers', 0, 2) # 包含第0个和第2个数据, ['1', '2', '3']
rdz.list_range('test:numbers', 0, -1) # -1表示最后一个数据项的索引, ['1', '2', '3', '4', '5', '6']
rdz.list_range('test:numbers', 0, 100) # 只会返回范围内的数据, ['1', '2', '3', '4', '5', '6']
rdz.list_range('test:not-exist', 0, -1) # []
"""
return self.get_redis().lrange(name, start, end)
def list_iter(self, name):
"""
描述:
利用yield封装创建生成器, 对指定列表元素进行增量迭代, 数据量较大时比较有用, 避免将数据全部取出把内存撑爆
参数:
name:string -redis的键名
示例:
for item in rdz.list_iter('test:numbers'): # 遍历列表
print(item)
"""
r = self.get_redis()
count = r.llen(name)
for index in range(count):
yield r.lindex(name, index)
def list_bpop(self, names, timeout=0, left=False):
"""
描述:
从names对应的一个或多个列表中依次移除元素, 返回结果是一个包含name和移除数据的元组('test:numbers', '2')
如果指定了多个列表, 则【依次】移除, 先移除第一个列表中的元素, 如果第一个列表的元素都被移除了, 再移除第二个列表中的元素, 依次类推
默认按照从右向左的方向进行移除, 可以通过left参数指定从左向右的方向进行移除
如果指定的列表不存在或都为空, 则会【阻塞】指定的时间(秒)直到数据存入, 如果time=0表示一直阻塞直到数据出现
参数:
name:string -redis的键名
timeout:int -列表为空时, 阻塞的的时间, 单位是秒, 如果time=0表示一直阻塞直到任一列表中出现数据, 必须是>=0的整数, 否则time不起作用
left:bool -left=True, 从左向右逐个移除元素
返回:
result:tuple -包含name和移除数据的元组, 如果阻塞指定时间以后没有数据, 返回None
示例:
# test:numbers1 = [1, 2], test:numbers2 = [3, 4],
# 从右向左依次移除
rdz.list_bpop(['test:numbers1', 'test:numbers2']) # ('test:numbers1', '2')
rdz.list_bpop(['test:numbers1', 'test:numbers2']) # ('test:numbers1', '1')
rdz.list_bpop(['test:numbers1', 'test:numbers2']) # ('test:numbers2', '4')
rdz.list_bpop(['test:numbers1', 'test:numbers2']) # ('test:numbers2', '3')
rdz.list_bpop(['test:numbers1', 'test:numbers2'], 2) # 阻塞等待两秒, 如果数据没有出现, 则向下运行
#从左向右依次移除
rdz.list_bpop(['test:numbers1', 'test:numbers2'], left=True) # ('test:numbers1', '1')
rdz.list_bpop(['test:numbers1', 'test:numbers2'], left=True) # ('test:numbers1', '2')
rdz.list_bpop(['test:numbers1', 'test:numbers2'], left=True) # ('test:numbers2', '3')
rdz.list_bpop(['test:numbers1', 'test:numbers2'], left=True) # ('test:numbers2', '4')
rdz.list_bpop(['test:numbers1', 'test:numbers2']) # 一直阻塞等待数据出现
"""
r = self.get_redis()
if left is True:
return r.blpop(names, timeout=timeout)
else:
return r.brpop(names, timeout=timeout)
def list_rpoplpush(self, src, dst, timeout=None):
"""
描述:
从一个列表的右侧移除一个元素并将其添加到另一个列表的左侧, 并将值返回
如果对应的列表中值不存在, 则会阻塞指定的时间(秒)直到数据存入, 如果time=0表示一直阻塞直到数据出现
参数:
src:string -源列表的键名
dst:string -目的列表的键名
timeout:int -源列表为空时, 阻塞的的时间, 单位是秒, None表示不阻塞, 0表示一直阻塞直到任一列表中出现数据,
返回:
item:string - 移动的值, 如果阻塞指定时间以后没有数据, 返回None
示例:
#test:numbers1 = [1, 2], test:numbers2 = [3, 4],
rdz.list_rpoplpush('test:numbers1','test:numbers2') # 返回2, test:numbers1 = ['1'], test:numbers2 = ['2', '3', '4']
rdz.list_rpoplpush('test:numbers1','test:numbers2') # 返回1, test:numbers1 = [], test:numbers2 = ['1', '2', '3', '4']
rdz.list_rpoplpush('test:numbers1', 'test:numbers2', 2) # 阻塞2s等待test:numbers1中的出现数据
rdz.list_rpoplpush('test:numbers1', 'test:numbers2', 0) # 一直阻塞直到test:numbers1中的出现数据
"""
r = self.get_redis()
if timeout is not None:
return r.brpoplpush(src, dst, timeout=timeout)
return r.rpoplpush(src, dst)
# ------------------------------------------ hash ------------------------------------------
def hash_set(self, name, key=None, value=None, mapping=None, nx=False):
"""
描述:
设置指定散列中的键值对, 默认情况, 如果指定散列不存在, 则创建并赋值, 否则修改已有散列
可以通过mapping一次设置多个键值对(初始化)
如果nx==True, 则只有当key(不是name)不存在时, set操作才执行, 而且nx操作只支持单个值的设置(key-value), 不支持mapping的设置方式
参数:
name:string -redis的键名
key:string -要设置的key
value:string|int|float -要设置的value
mapping:dict -多个键值对组成的dict
nx:bool - nx=True, 只有key在mapping中不存在时才设置
返回:
count:int -设置成功的键值数量
示例:
rdz.hash_set('test:taozh', 'name', 'Zhang Tao') # 创建散列 -> {'name': 'Zhang Tao'}
rdz.hash_set('test:taozh', mapping={'age': 18, 'email': 'taozh@cisco.com'}) # 一次设置多个键值 -> {'name': 'Zhang Tao', 'age': '18', 'email': 'taozh@cisco.com'}
rdz.hash_set('test:taozh', 'email', 'zht@cisco.com', nx=True) # email已经存在, set操作无效 -> {'name': 'Zhang Tao', 'age': '18', 'email': 'taozh@cisco.com'}
rdz.hash_set('test:taozh', 'company', 'cisco', nx=True) # company不存在, set操作有效 -> {'name': 'Zhang Tao', 'age': '18', 'email': 'taozh@cisco.com', 'company': 'cisco'}
"""
r = self.get_redis()
if nx is True:
# if key is not None:
return r.hsetnx(name, key, value)
return r.hset(name, key=key, value=value, mapping=mapping)
def hash_get(self, name, key):
"""
描述:
获取指定散列中指定key的键值, 如果散列不存在/key不存在, 返回None
参数:
name:string -redis的键名
key:string -要获取的key
返回:
value:string -key对应的键值, 如果散列不存在/key不存在, 返回None
示例:
# test:taozh={'name': 'Zhang Tao', 'age': '18', 'email': 'taozh@cisco.com'}
rdz.hash_get('test:taozh', 'email') # taozh@cisco.com
rdz.hash_get('test:taozh', 'city') # None
"""
return self.get_redis().hget(name, key)
def hash_mset(self, name, mapping):
"""
描述:
在指定散列中批量设置键值对, 等价于hash_set(name,mapping={...}), 如果散列不存在则创建并赋值, 存在则修改
参数:
name:string -redis的键名
mapping:dict -批量设置的键值对
返回:
result:bool -True
示例:
# test:taozh = {'name': 'Zhang Tao'}
rdz.hash_mset('test:taozh', {'age': 18, 'email': 'taozh@cisco.com'}) # test:taozh -> {'name': 'Zhang Tao', 'age': '18', 'email': 'taozh@cisco.com'}
rdz.hash_mset('test:zht', {'name': 'Zhang Tao', 'age': '18'}) # 不存在则创建 test:zht={'name': 'Zhang Tao', 'age': '18'}
"""
# return self.get_redis().hmset(name, mapping) # hmset deprecated
return self.get_redis().hset(name, mapping=mapping)
def hash_mget(self, name, keys, *args):
"""
描述:
在指定散列中获取多个key的键值
可以在keys中指定要获取的key列表, 也可以通过位置参数指定, 两者也可以混用
返回结果为包含值的列表, 如果散列不存在/key不存在, 列表中的值为None
参数:
name:string -redis的键名
keys:list -key列表
args -通过位置参数传递的一个/多个key
返回:
values:list -返回的value列表, 如果散列不存在/key不存在, 列表中的值为None
示例:
#test:taozh={'name': 'Zhang Tao', 'age': '18', 'email': 'taozh@cisco.com', 'city': 'shanghai'}
rdz.hash_mget('test:taozh', 'name') # ['Zhang Tao']
rdz.hash_mget('test:taozh', 'name', 'age') # ['Zhang Tao', '18']
rdz.hash_mget('test:taozh', ['name', 'age']) # ['Zhang Tao', '18']
rdz.hash_mget('test:taozh', ['name', 'age'], 'email') # ['Zhang Tao', '18', 'taozh@cisco.com']
rdz.hash_mget('test:taozh', 'key-nx') # [None]
"""
return self.get_redis().hmget(name, keys, *args)
def hash_del(self, name, keys, *args):
"""
描述:
将指定散列中一个或多个指定key的键值对删除, 并返回删除成功的个数
如果散列不存在, 返回0
参数:
name:string -redis的键名
keys:list -要设置的多个key
args -也可以通过位置参数传递要删除一个/多个的key
返回:
count:int -删除【成功】的个数,
示例:
# test:kv={'k1': '1', 'k2': '2', 'k3': '3', 'k4': '4', 'k5': '5', 'k6': '6', 'k7': '7'}
rdz.hash_del('test:kv', 'k1', 'k2') # 返回2, -> {'k3': '3', 'k4': '4', 'k5': '5', 'k6': '6', 'k7': '7'}
rdz.hash_del('test:kv', ['k3', 'k4']) # 返回2, -> {'k5': '5', 'k6': '6', 'k7': '7'}
rdz.hash_del('test:kv', ['k5','k-nx']) # 返回1, 因为k-nx不存在, 只删除了k5, -> {'k6': '6', 'k7': '7'}
"""
keys = get_list_args(keys, args)
return self.get_redis().hdel(name, *keys)
def hash_getall(self, name):
"""
描述:
获取指定散列的所有键值, 如果散列不存在则返回{}
参数:
name:string -redis的键名
返回:
map:dict -所有的键值dict
示例:
# test:taozh={'name': 'Zhang Tao', 'age': '18', 'email': 'taozh@cisco.com'}
rdz.hash_getall('test:taozh') # {'name': 'Zhang Tao', 'age': '18', 'email': 'taozh@cisco.com'}
rdz.hash_getall('test:hash-nx') # {}
"""
return self.get_redis().hgetall(name)
def hash_exists(self, name, key):
"""
描述:
检查指定散列中是否存在指定的key
如果散列中存在key对应的键值返回True, 如果不存在返回False, 如果散列不存在返回False
参数:
name:string -redis的键名
key:string -指定的key
返回:
is_exist:bool -如果散列中存在key对应的键值返回True, 如果不存在返回False, 如果散列不存在返回False
示例:
# test:taozh={'name': 'Zhang Tao', 'age': '18', 'email': 'taozh@cisco.com'}
rdz.hash_exists('test:taozh', 'name') # True
rdz.hash_exists('test:taozh', 'city') # False, key不存在
rdz.hash_exists('test:not-exist', 'name') # False, 散列不存在
"""
return self.get_redis().hexists(name, key)
def hash_len(self, name):
"""
描述:
获取指定散列中键值对的个数, 如果散列不存在, 则返回0
参数:
name:string -redis的键名
返回:
length:int - 键值个数
示例:
# test:taozh={'name': 'Zhang Tao', 'age': '18', 'email': 'taozh@cisco.com'}
rdz.hash_len('test:taozh') # 3
rdz.hash_len('test:not-exist') # 0
"""
return self.get_redis().hlen(name)
def hash_keys(self, name):
"""
描述:
获取指定散列中所有的key的值列表, 如果散列不存在则返回[]
参数:
name:string -redis的键名
返回:
keys:list -散列中所有key的list, 如果散列不存在则返回[]
示例:
# test:taozh={'name': 'Zhang Tao', 'age': '18', 'email': 'taozh@cisco.com'}
rdz.hash_keys('test:taozh') # ['name', 'age', 'email']
rdz.hash_keys('test:not-exist') # []
"""
return self.get_redis().hkeys(name)
def hash_values(self, name):
"""
描述:
获取指定散列中所有value的list, 如果散列不存在, 则返回[]
参数:
name:string -redis的键名
返回:
values:list -散列中所有value的list, 如果散列不存在则返回[]
示例:
#test:taozh={'name': 'Zhang Tao', 'age': '18', 'email': 'taozh@cisco.com'}
rdz.hash_values('test:taozh') # ['Zhang Tao', '18', 'taozh@cisco.com']
rdz.hash_values('test:hash-nx') # []
"""
return self.get_redis().hvals(name)
def hash_incr(self, name, key, amount=1):
"""用法请参考str_incr"""
return self.get_redis().hincrby(name, key, amount=amount)
def hash_decr(self, name, key, amount=1):
"""用法请参考str_decr"""
return self.get_redis().hincrby(name, key, amount=-amount)
def hash_incrfloat(self, name, key, amount=1.0):
"""用法请参考str_incrfloat
"""
return self.get_redis().hincrbyfloat(name, key, amount=amount)
def hash_decrfloat(self, name, key, amount=1.0):
"""
用法请参考str_decrfloat
"""
return self.get_redis().hincrbyfloat(name, key, amount=-amount)
def hash_scan(self, name, cursor=0, match=None, count=None):
"""
描述:
基于游标的迭代器, 以【分片】的方式【批量】获取数据, 对于数据量较大的数据非常有用, 可以避免取出全部数据把内存撑爆
每次调用时, 返回一个更新的游标cursor和分片数据【字典】组成的元组
match是匹配条件, 可以通过匹配条件对散列的key进行过滤
但是请注意,【match是在检索以后应用的】, 如果每次检索出来的集合包含较少满足条件的数据, 在大多数迭代数据可能都是空
count选项是每次分片的数据长度, 默认是10,
请注意, 即便设置了count, 也【不能确保每次取出来的数据长度】, 真实的长度可能会【大于或等于】设置的值, 甚至会一次全部取出
参数:
name:string -redis的键名
cursor:int -迭代器的游标
match:string -pattern匹配条件, 有如下可选项Å
h?llo -matches hello, hallo and hxllo
h*llo -matches hllo and heeeello
h[ae]llo -matches hello and hallo, but not hillo
h[^e]llo -matches hallo, hbllo, ... but not hello
h[a-b]llo -matches hallo and hbllo
count:int -每次分片的数据长度, 默认是10
返回:
cursor:int - 更新的游标cursor
data:dict -分片数据字典组成的元组
示例:
# 添加测试数据
for i in range(10000):
rdz.hash_set('test:xxx', i, i)
cursor = 0
count = 0
while True:
cursor, data = rdz.hash_scan('test:xxx', cursor=cursor,count=20) # data为包含分片数据的dict ->{'k188': 'v188', 'k938': 'v938',...}
print(cursor, data)
count+=1
if cursor == 0:
break
print(count) # 迭代了大约490次左右
"""
return self.get_redis().hscan(name, cursor=cursor, match=match, count=count)
def hash_scan_iter(self, name, match=None, count=None):
"""
描述:
以迭代器的方式分批去redis中批量获取散列数据, 每个迭代对象为由key和value组成的元组, 数据量较大的数据非常有用
和hash_scan的主要区别是: hash_scan_iter【不需要记住游标】的位置, 迭代即可
参数:
name:string -redis的键名
match:string -pattern匹配条件, 有如下可选项
h?llo -matches hello, hallo and hxllo
h*llo -matches hllo and heeeello
h[ae]llo -matches hello and hallo, but not hillo
h[^e]llo -matches hallo, hbllo, ... but not hello
h[a-b]llo -matches hallo and hbllo
count:int -每次分片的数据长度, 默认是10
返回:
iter -迭代器
示例:
# 添加测试数据
for i in range(10000):
rdz.hash_set('test:xxx', i, i)
for item in rdz.hash_scan_iter('test:xxx'):
print(item) # ('k368', 368.0)
"""
return self.get_redis().hscan_iter(name, match=match, count=count)
# ------------------------------------------ set ------------------------------------------
def set_add(self, name, values, *args):
"""
描述:
向指定集合中添加一个或多个元素, 如果集合不存在则新建并赋值, 返回结果为添加成功的元素个数
可以通过列表或位置参数指定要添加的元素, 两者可以混用
参数:
name:string -redis的键名
values:list -要添加的元素列表
args -通过位置参数传递的一个/多个元素
返回:
success_count:int -添加成功的数量
示例:
rdz.set_add('test:letters', 'a', 'b', 'c') # 3, 创建集合并赋值, test:letters={'a', 'b', 'c'}
rdz.set_add('test:letters', ['b', 'c', 'd']) # 1, 添加成功了'd', test:letters -> {'a', 'b', 'c', 'd'}
rdz.set_add('test:letters', ['c', 'd'], 'e', 'f') # 2, 添加成功了'e'+'f', test:letters -> {'a', 'b', 'c', 'd', 'e', 'f'}
"""
values = get_list_args(values, args)
return self.get_redis().sadd(name, *values)
def set_rem(self, name, values, *args):
"""
描述:
从指定集合中删除指定的元素, 返回结果为删除成功的元素个数
参数:
name:string -redis的键名
values:list -要删除的值list
args -通过位置参数指定要删除的一个/多个值
返回:
success_count:int -删除成功的元素个数, 如果集合不存在, 返回0
示例:
# test:letters = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'}
rdz.set_rem('test:letters', 'a', 'b') # 2, test:letters -> {c', 'd', 'e', 'f', 'g', 'h'}
rdz.set_rem('test:letters', ['c', 'd']) # 2, test:letters-> {'e', 'f', 'g', 'h'}
rdz.set_rem('test:letters', ['e', 'f'], 'g', 'x') # 3, test:letters-> {'h'}, x不存在, 所以结果为3
"""
values = get_list_args(values, args)
return self.get_redis().srem(name, *values)
def set_pop(self, name, count=None):
"""
描述:
从指定集合随机移除一个/多个元素, 并将其返回, 因为集合是无序的, 所以删除是【随机】的
只要设置了count, 返回的肯定是列表, 根据情况, 可能返回的是空列表
- 如果count=0, 返回一个空的列表[]
- 如果count>0, 返回一个移除元素组成的列表, 如果集合不存在, 也会返回一个空的列表[]
参数:
name:string -redis的键名
count:int -要移除的元素数, 取值范围是>=0的整数
返回:
item:string|list -移除的元素, 只要设置了count(>=0), 返回的都是包含移除元素的列表, count=0/1或集合不存在会返回[]
示例:
# test:letters = {'a', 'b', 'c', 'd', 'e'}
rdz.set_pop('test:letters') # 返回'e', test:letters -> {'a', 'd', 'b', 'c'}
rdz.set_pop('test:letters', 2) # 返回['a', 'b'], test:letters -> {'d', 'c'}
"""
return self.get_redis().spop(name, count=count)
def set_card(self, name):
"""
描述
获取指定集合的元素个数
参数:
name:string -redis的键名
返回:
count:int -元素个数, 如果集合不存在, 返回0
示例:
# test:letters = {'a', 'b', 'c', 'd', 'e'}
rdz.set_card('test:letters') # 5
rdz.set_card('test:not-exist') # 0
"""
return self.get_redis().scard(name)
def set_members(self, name):
"""
描述:
获取指定集合中所有元素组成的set, 如果集合不存在返回一个空set对象
因为集合是无序的, 所以每次取出的set元素顺序可能都是不一样的
参数:
name:string -redis的键名
返回:
members:set -所有集合元素组成的set
示例:
#test:letters = {'a', 'b', 'c', 'd', 'e'}
rdz.set_members('test:letters') # {'a', 'b', 'c', 'd', 'e'}
rdz.set_members('test:not-exist') # set()
"""
return self.get_redis().smembers(name)
def set_ismember(self, name, value):
"""
描述:
检查value是否是指定集合的元素
参数:
name:string -redis的键名
value:string|int|float -要检查的元素
返回:
is:bool -如果value是集合的成员, 返回True, 否则返回False, 集合不存在也返回False
示例:
# test:letters = {'a', 'b', 'c', 'd', 'e'}
rdz.set_ismember('test:letters', 'a') # True
rdz.set_ismember('test:letters', 'x') # False
rdz.set_ismember('test:not-exist', 'a') # False
"""
return self.get_redis().sismember(name, value)
def set_scan(self, name, cursor=0, match=None, count=None):
"""
描述:
基于游标的迭代器, 以【分片】的方式【批量】获取数据, 对于数据量较大的数据非常有用, 可以避免取出全部数据把内存撑爆
每次调用时, 返回一个更新的游标cursor和分片数据【列表】组成的元组
match是匹配条件, 可以通过匹配条件对集合的值进行过滤
但是请注意,【match是在检索以后应用的】, 如果每次检索出来的集合包含较少满足条件的数据, 在大多数迭代数据可能都是空
count选项是每次分片的数据长度, 默认是10,
请注意, 即便设置了count, 也【不能确保每次取出来的数据长度】, 真实的长度可能会【大于或等于】设置的值, 甚至会一次全部取出
参数:
name:string -redis的键名
cursor:int -迭代器的游标
match:string -pattern匹配条件, 有如下可选项
h?llo -matches hello, hallo and hxllo
h*llo -matches hllo and heeeello
h[ae]llo -matches hello and hallo, but not hillo
h[^e]llo -matches hallo, hbllo, ... but not hello
h[a-b]llo -matches hallo and hbllo
count:int -每次分片的数据长度, 默认是10
返回:
cursor:int -更新的游标cursor
cursor_data:list -分片数据列表组成的元组
示例:
# 添加测试数据
rdz.set_add('test:xxx', *range(10000))
cursor = 0
count = 0
while True:
cursor, data = rdz.set_scan('test:xxx', cursor=cursor, count=20) # data为包含元素的list -> ['1787', '219', '101',...]
print(cursor, data)
count += 1
if cursor == 0:
break
print(count) # 迭代了大约490次左右
"""
return self.get_redis().sscan(name, cursor=cursor, match=match, count=count)
def set_scan_iter(self, name, match=None, count=None):
"""
描述:
以迭代器的方式, 以【分片】的方式【批量】获取数据, 对于数据量较大的数据非常有用, 可以避免取出全部数据把内存撑爆
和set_scan的主要区别是: set_scan_iter【不需要记住游标】的位置, 迭代即可
参数:
name:string -redis的键名
match:string -pattern匹配条件, 有如下可选项
h?llo -matches hello, hallo and hxllo
h*llo -matches hllo and heeeello
h[ae]llo -matches hello and hallo, but not hillo
h[^e]llo -matches hallo, hbllo, ... but not hello
h[a-b]llo -matches hallo and hbllo
count:int -每次分片的数据长度, 默认是10
返回:
iter -迭代器
示例:
# 添加测试数据
rdz.set_add('test:xxx', *range(10000))
for i in rdz.set_scan_iter('test:xxx'):
print(i) # 218
"""
return self.get_redis().sscan_iter(name, match=match, count=count)
def set_move(self, src, dst, value):
"""
描述:
将指定元素从一个源集合中移动到目的集合
请注意, 只要把元素从源集合移出, 返回结果就是True, 无论是否移入目标集合
参数:
src:string -源集合
dst:string -目标集合
value:string -要移动的元素
返回:
rem_success:bool -移动成功返回True, 否则返回False, 只要把元素从源集合移出, 返回结果就是True, 无论是否移入目标集合
示例:
# test:letters1={'a', 'b', 'c'}, test:letters2={'c', 'd', 'e'}
rdz.set_move('test:letters1', 'test:letters2', 'a') # True, test:letters1={'b', 'c'}, test:letters2={'a', 'c', 'd', 'e'}
rdz.set_move('test:letters1', 'test:letters2', 'c') # True, test:letters1={'b'}, test:letters2={'a', 'c', 'd', 'e'}
rdz.set_move('test:letters1', 'test:letters2', 'f') # False, test:letters1={'b'}, test:letters2={'a', 'c', 'd', 'e'}
"""
return self.get_redis().smove(src, dst, value)
def set_diff(self, names, *args, dst=None):
"""
描述:
差集, 返回只存在于【第一个】集合, 但不在其余集合中的元素集合, 即存在第一个集合中, 但不存在其他集合中的元素的集合
可以将差集结果存储到一个新的dst集合中, 请注意, 如果dst对应的键值在redis已经存在(不论类型), 都会被【替换】
参数:
names:list -多个比较的集合列表
args: -以位置参数方式传递的多个集合列表
返回:
result:set -差值集合, 如果设置dst, 返回差集中的元素【数量】
示例:
# test:letters1={'a', 'b', 'c'}, test:letters2={'b', 'm', 'n'}, test:letters3={'c', 'x', 'y'}
rdz.set_diff('test:letters1', 'test:letters2') # {'c', 'a'}
rdz.set_diff(['test:letters2', 'test:letters3']) # {'b', 'm', 'n'}
rdz.set_diff(['test:letters1', 'test:letters2', 'test:letters3']) # {'a'}
rdz.set_diff('test:letters1', 'test:not-exist') # {'a', 'b', 'c'}, 和不存在的set差集, 返回原集合中所有元素组成的集合
rdz.set_diff('test:not-exist', 'test:letters1') # set(), 不存在的集合和其他集合差集, 返回一空集合对象
#test:diff=['a', 'x']
rdz.set_diff(['test:letters1', 'test:letters2'], dst='test:diff') # 返回2, test:diff = {'a', 'c'}, 将diff结果存储到dst集合中, 无论原dst是什么类型
"""
names = get_list_args(names, args)
r = self.get_redis()
if dst is not None:
return r.sdiffstore(dst, names)
return r.sdiff(names)
def set_inter(self, names, *args, dst=None):
"""
描述:
交集, 返回多个集合中元素的交集, 即同时存在于多个指定集合中的元素集合
可以将交集结果存储到一个新的dst集合中, 请注意, 如果dst对应的键值在redis已经存在(不论类型), 都会被替换
默认返回交集集合, 如果设置dst, 返回交集中的元素数量
参数:
names:list -进行交集运算的集合列表
arge: -以位置参数方式传递的多个集合列表
返回:
result:set -交集集合, 如果设置dst, 返回交集中的元素【数量】
示例:
# test:letters1={'a', 'b', 'c'}, test:letters2={'b', 'c', 'd'}, test:letters3={'c', 'd', 'e'}
rdz.set_inter(['test:letters1', 'test:letters2']) # {'b', 'c'}
rdz.set_inter(['test:letters2', 'test:letters3']) # {'c', 'd'}
rdz.set_inter(['test:letters1', 'test:letters2', 'test:letters3']) # {'c'}
rdz.set_inter('test:letters1', 'test:not-exist') # set(), 和不存在的集合交集, 返回一空集合对象
#test:inter=['a', 'x']
rdz.set_inter(['test:letters1', 'test:letters2'], dst='test:inter') # 2, test:inter = {'b', 'c'}
"""
r = self.get_redis()
if dst is not None:
return r.sinterstore(dst, names, *args)
return r.sinter(names, *args)
def set_union(self, names, *args, dst=None):
"""
描述:
并集, 获取多个集合中元素的并集
可以将并结果存储到一个新的dst集合中, 请注意, 如果dst对应的键值在redis已经存在(不论类型), 都会被替换
默认返回并集结合, 如果设置dst, 返回并集中的元素数量
参数:
names:list -进行并集运算的集合列表
arge: -以位置参数方式传递的多个集合列表
返回:
result:set -并集集合, 如果设置dst, 返回并集中的元素数量
示例:
#test:letters1={'a', 'b', 'c'}, test:letters2={'b', 'c', 'd'}, test:letters3={'c', 'd', 'e'}
rdz.set_union('test:letters1', 'test:letters2') # {'a', 'b', 'c', 'd'}
rdz.set_union(['test:letters2', 'test:letters3']) # {'b', 'c', 'd', 'e'}
rdz.set_union(['test:letters1', 'test:letters2', 'test:letters3']) # {'a', 'b', 'c', 'd', 'e'}
#test:union=['a', 'x']
rdz.set_union(['test:letters1', 'test:letters2'],dst='test:union') # 4, test:union = {'a', 'b', 'c', 'd'}
"""
r = self.get_redis()
if dst is not None:
return r.sunionstore(dst, names, *args)
return r.sunion(names, *args)
# ------------------------------------------ zset ------------------------------------------
def zset_add(self, name, mapping, **kwargs):
"""
描述:
在指定有序集合中添加元素,
默认情况, 如果zset不存在, 则创建并赋值
如果mapping中指定的元素已存在, 则替换
参数:
name:string -redis的键名
mapping:dict -要添加的元素和分数字典, 分数必须是【数字】
kwargs可选参数如下:
nx:bool - 如果设置为True, 则只有元素【不存在】时, 当前add操作才执行
xx:bool - 如果设置为True, 则只有元素【存在】时, 当前add操作才执行
ch:bool - 如果设置为True, 将返回【修改】的元素数
返回:
count:int - 添加成功的元素数, 默认如果是修改则不计数
示例:
rdz.zset_add('test:zset', {'a': 10, 'b': 20}) # 2, test:zset = {'a': 10, 'b': 20}
rdz.zset_add('test:zset', {'b': 30, 'c': 40}) # 1, 替换b的值, test:zset = {'a': 10, 'b': 30, 'c': 40}
rdz.zset_add('test:zset', {'c': 50, 'd': 60}, nx=True) # 1, nx=True只添加不存在的值d, c已存在不做处理, test:zset = {'a': 10, 'b': 30, 'c': 40, 'd':60}
rdz.zset_add('test:zset', {'d': 70, 'e': 80}, xx=True) # 0, xx=True只替换已存在的值d, e不存在不做处理, test:zset = {'a': 10, 'b': 30, 'c': 40, 'd':70}
rdz.zset_add('test:zset', {'x': 100, 'y': 200, 'z': 300})
rdz.zset_add('test:zset', {'x': 110, 'y': 220, 'z': 300}, ch=True) # 2, 只更新了x和y的值
"""
return self.get_redis().zadd(name, mapping, **kwargs)
def zset_rem(self, name, members, *args):
"""
描述:
删除指定有序集合中的一个/多个元素
参数:
name:string -redis的键名
members:list -要删除元素的列表
args -通过关键字参数传递的一个/多个要删除的元素
返回:
count:int -删除成功的个数, 如果有序集合不存在, 返回0
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30, 'x': 100}
rdz.zset_rem('test:zset', 'a') # 1, test:zset -> {'b': 20, 'c': 30, 'x': 100}
rdz.zset_rem('test:zset', ['b', 'c','e']) # 2, 只删除了b+c, test:zset->{'x': 100},
"""
members = get_list_args(members, args)
return self.get_redis().zrem(name, *members)
def zset_card(self, name):
"""
描述:
获取指定有序集合元素的数量
参数:
name:string -redis的键名
返回:
count:int -指定有序集合中元素的数量, 如果zset不存在则返回0
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30}
rdz.zset_card('test:zset') # 3
rdz.zset_card('test:not-exist') # 0
"""
return self.get_redis().zcard(name)
def zset_count(self, name, min_score, max_score):
"""
描述:
获取指定有序集合中【分数】在 [min,max] 之间的个数
如果有序集合不存在, 返回0
参数:
name:string -redis的键名
min_score:int/float -最小的分数值, >=min
max_score:int/float -最大的分数值, <=max
返回:
count:int -有序集合中【分数】在 [min,max] 之间的个数
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30}
rdz.zset_count('test:zset', 20, 30) # 2, 20=<score<=30
rdz.zset_count('test:zset', 21, 30) # 1, 21=<score<=30
rdz.zset_count('test:not-exist', 1, 10) # 0
"""
return self.get_redis().zcount(name, min_score, max_score)
def zset_score(self, name, member):
"""
描述:
获取指定有序集合中value元素的分数
参数:
name:string -redis的键名
member:string -有序集合中的元素
返回:
score:float -value元素的分数, 如果有序集合不存在/元素不存在, 返回None
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30}
rdz.zset_score('test:zset', 'a') # 10.0
rdz.zset_score('test:zset', 'x') # None
rdz.zset_score('test:not-exist', 'x') # None
"""
return self.get_redis().zscore(name, member)
def zset_rank(self, name, member):
"""
描述:
获取指定的元素在有序集合中的索引(从0开始)
参数:
name:string -redis的键名
member:str -指定的元素
返回:
index:int -value元素在有序集合中的索引, 如果有序集合不存在/元素不存在返回None
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30}
rdz.zset_rank('test:zset', 'a') # 0
rdz.zset_rank('test:zset', 'b') # 1
rdz.zset_rank('test:zset', 'x') # None
rdz.zset_rank('test:not-exist', 'x') # None
"""
return self.get_redis().zrank(name, member)
def zset_incr(self, name, amount, member):
"""
描述:
增加指定有序集合中value元素的【分数】, 如果元素在有序集合中不存在, 则创建并赋值
参数:
name:string -redis的键名
amount:int|float -要增加的【分数】, 如果<0表示要减掉的分数
member:str -要增加分数的元素
返回:
score:float -增加以后的分数
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30}
rdz.zset_incr('test:zset', 1, 'a') # 11.0
rdz.zset_incr('test:zset', 2.2, 'b') # 22.2
rdz.zset_incr('test:zset', -2, 'c') # 28.0
rdz.zset_incr('test:zset', 3, 'e') # 3.0, test:zset -> [('e', 3), ('a', 11), ('b', 22.2), ('c', 28)]
"""
return self.get_redis().zincrby(name, amount, member)
def zset_decr(self, name, amount, member):
"""
描述:
减少指定有序集合中value元素的【分数】, 如果元素在有序集合中不存在, 则创建并赋值
参数:
name:string -redis的键名
amount:int|float -要减少的【分数】, 如果>0表示要增加的分数
member:str -要减少分数的元素
返回:
score:float -减少以后的分数
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30}
rdz.zset_decr('test:zset', 1, 'a') # 9.0
rdz.zset_decr('test:zset', 2.2, 'b') # 17.8
rdz.zset_decr('test:zset', -2, 'c') # 32.0
rdz.zset_decr('test:zset', 3, 'e') # -3.0, test:zset- > [('e', -3), ('a', 9), ('b', 17.8), ('c', 32)]
"""
return self.get_redis().zincrby(name, -amount, member)
def zset_range(self, name, start, end, desc=False, withscores=False, score_cast_func=float, byscore=False):
"""
描述:
按照索引范围获取指定有序集合的元素
默认情况, 返回结果按score【从小到大】排列, 设置desc=True以从大到小排列
默认情况, start和end表示【索引】范围, 支持负数, 最后一个元素的索引是-1, 设置byscore=True, start和end表示分数范围,
无论start和end表示索引还是分数, 都会被【包含】在结果内(>= & <=)
如果同时指定了desc=True和byscore=True,start的值要大于end的值,否则会返回空
参数:
name -redis的键名
start:int -有序集合【索引】起始位置, 如果byscore=True, 则表示起始分数, start=<
end:int -有序集合【索引】结束位置, 如果byscore=True, 则表示结束分数, <=end
desc:bool -默认按照分数从小到大排序, 设置desc=True, 则按分数【从大到小】排序
withscores:bool -默认只获取元素, 设置withscores=True, 会把分数也一起返回
score_cast_func:func -对分数进行数据转换的函数, 默认是float, 函数只对返回的结果有影响, 【不影响排序】
byscore:bool -默认按索引进行查询, 设置byscore=True, 按分数查询, start和end作为分数范围使用
返回:
values:list -有序集合在start-end范围内的元素列表, 如果withscores=True则返回包含元素和分数元组的列表, 参考示例
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30}
rdz.zset_range('test:zset', 0, -1) # 从小到大的所有元素, ['a', 'b', 'c']
rdz.zset_range('test:zset', 0, 1) # 从小到大,索引在0=<index<=1之间的所有元素, ['a', 'b']
rdz.zset_range('test:zset', 0, -1, desc=True) # 从大到小的所有元素, ['c', 'b', 'a']
rdz.zset_range('test:zset', 0, 1, desc=True) # 从大到小, 索引在0=<index<=1之间的所有元素,['c', 'b']
rdz.zset_range('test:zset', 0, -1, withscores=True) # 返回元素和分数, [('a', 10.0), ('b', 20.0), ('c', 30.0)]
rdz.zset_range('test:zset', 0, 20, withscores=True, byscore=True) # start和end指定的是分数范围, [('a', 10.0), ('b', 20.0)]
rdz.zset_range('test:zset', 20, 0, desc=True, withscores=True, byscore=True) # 从大到小,分数在20>=score>=0之间的元素, [('b', 20.0), ('a', 10)]
rdz.zset_range('test:zset', 0, 20, desc=True, withscores=True, byscore=True) # 返回[], 没有0>=score>=20的分数
rdz.zset_range('test:zset', 0, -1, withscores=True, score_cast_func=int) # [('a', 10), ('b', 20), ('c', 30)]
rdz.zset_range('test:zset', 0, -1, withscores=True, score_cast_func=lambda x: str(x) + '%') # [('a', '10%'), ('b', '20%'), ('c', '30%')]
"""
return self.get_redis().zrange(name, start, end, desc=desc, withscores=withscores, score_cast_func=score_cast_func, byscore=byscore)
def zset_revrange(self, name, start, end, withscores=False, score_cast_func=float):
"""
描述:
按索引范围,以分数【从大到小】的顺序从指定有序集合中获取元素
start和end表示的是【索引】范围, 而不是分数范围
是zset_range方法的一个简化版本, == zset_range(name, start, end, withscores=False, score_cast_func=float, desc=True, byscore=True)
参数:
name:string -redis的健名
start:int -起始【索引】位置, 可以是负数 start=<
end:int -结束【索引】位置, 可以是负数 <=end
withscores:bool -默认只返回元素, 设置withscores=True, 会把分数也一起返回
score_cast_func:func -对分数进行数据转换的函数
返回:
values:list -有序集合在start-end范围内的元素列表, 如果withscores=True则返回包含元素和分数元组的列表, 参考示例
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30}
rdz.zset_revrange('test:zset', 0, -1) # ['c', 'b', 'a']
rdz.zset_revrange('test:zset', 0, 1) # ['c', 'b']
rdz.zset_revrange('test:zset', 0, -1, withscores=True) # [('c', 30.0), ('b', 20.0), ('a', 10.0)]
rdz.zset_revrange('test:zset', 0, -1, withscores=True, score_cast_func=lambda x: str(x) + '%'), [('c', '30%'), ('b', '20%'), ('a', '10%')]
"""
return self.get_redis().zrevrange(name, start, end, withscores=withscores, score_cast_func=score_cast_func)
def zset_rangebyscore(self, name, min_score, max_score, start=None, num=None, withscores=False, score_cast_func=float):
"""
描述:
按分数范围,以分数【从小到大】的顺序从指定有序集合中获取元素
min_score和max_score表示的是【分数】, 而不是索引
start和num用来指定获取元素的开始和个数, 两者必须同时指定, 否则会引发异常
在zset_range(name, start, end, desc=False, withscores=False, score_cast_func=float, byscore=True)的基础上增加了start/number两个参数
参数:
name:string -redis的健名
min_score:int -最小【分数】min=<
max_score:int -最大【分数】<=max
start:int -开始【索引】<=start
num:int -要获取的元素【个数】
withscores:bool -默认只返回元素, 设置withscores=True, 会把分数也一起返回
score_cast_func:func -对分数进行数据转换的函数
返回:
values:list -有序集合在min-max范围内的从大到小排序的元素列表
如果有序集合不存在/没有符合条件的元素, 返回[]
如果withscores=True则返回包含元素和分数元组的列表, 请参考示例
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30, 'd': 40, 'e': 50, 'f': 60}
rdz.zset_rangebyscore('test:zset', 20, 50) # ['b', 'c', 'd', 'e']
rdz.zset_rangebyscore('test:zset', 20, 50, withscores=True) # [('b', 20.0), ('c', 30.0), ('d', 40.0), ('e', 50.0)]
rdz.zset_rangebyscore('test:zset', 20, 50, 0, 1, withscores=True) # [('b', 20.0)]
rdz.zset_rangebyscore('test:zset', 20, 50, 1, 2, withscores=True) # [('c', 30.0), ('d', 40.0)]
rdz.zset_rangebyscore('test:zset', 20, 50, 1, 10, withscores=True) # [('c', 30.0), ('d', 40.0), ('e', 50.0)]
"""
return self.get_redis().zrangebyscore(name, min_score, max_score, start=start, num=num, withscores=withscores, score_cast_func=score_cast_func)
def zset_revrangebyscore(self, name, max_score, min_score, start=None, num=None, withscores=False, score_cast_func=float):
"""
描述:
按分数范围,以分数【从大到小】的顺序从指定有序集合中获取元素
min和max表示的是【分数】, 而不是索引
start和num用来指定获取元素的开始和个数, 两者必须同时指定, 否则会引发异常
在zset_range(name, start, end, desc=True, withscores=False, score_cast_func=float, byscore=True)的基础上增加了start/number两个参数
和zset_rangebyscore的区别是: zset_rangebyscore是从小到大, zset_revrangebyscore是从大到小
参数:
name:string -redis的健名
max_score:int -最大【分数】<=max
min_score:int -最小【分数】min=<
start:int -开始【索引】
num:int -要获取的元素【个数】
withscores:bool -默认只返回元素, 设置withscores=True, 会把分数也一起返回
score_cast_func:func -对分数进行数据转换的函数
返回:
values:list -有序集合在min-max范围内从大到小排列的元素列表
如果有序集合不存在/没有符合条件的元素, 返回[]
如果withscores=True则返回包含元素和分数元组的列表, 请参考示例
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30, 'd': 40, 'e': 50, 'f': 60}
rdz.zset_revrangebyscore('test:zset', 50, 20)) # ['e', 'd', 'c', 'b']
rdz.zset_revrangebyscore('test:zset', 50, 20, withscores=True)) # [('e', 50.0), ('d', 40.0), ('c', 30.0), ('b', 20.0)]
rdz.zset_revrangebyscore('test:zset', 50, 20, 0, 1, withscores=True) # [('e', 50.0)]
rdz.zset_revrangebyscore('test:zset', 50, 20, 1, 2, withscores=True)) # [('d', 40.0), ('c', 30.0)]
rdz.zset_revrangebyscore('test:zset', 50, 20, 1, 10, withscores=True) # [('d', 40.0), ('c', 30.0), ('b', 20.0)]
"""
return self.get_redis().zrevrangebyscore(name, max_score, min_score, start=start, num=num, withscores=withscores, score_cast_func=score_cast_func)
def zset_remrangebyrank(self, name, start, end):
"""
描述:
从有序集合中删除指定【索引】的元素
start和end索引处的元素也会被删除
参数:
name:string -redis的健名
start:int -最小索引 min<=, 可以为负数, -1表示最后一个元素
end:int -最大索引 <=max, 可以为负数, -1表示最后一个元素
返回:
rem_count:int -成功删除的元素个数, 如果有序集合不存在或者索引超出返回, 返回0
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30, 'd': 40, 'e': 50, 'f': 60}
rdz.zset_remrangebyrank('test:zset', 0, 3) # 4, test:zset -> [('e', 50.0), ('f', 60.0)]
rdz.zset_remrangebyrank('test:zset', 10, 20) # 0
# test:zset={'a': 10, 'b': 20, 'c': 30, 'd': 40, 'e': 50, 'f': 60}
rdz.zset_remrangebyrank('test:zset', -3, -1) # 3, test:zset -> [('a', 10.0), ('b', 20.0), ('c', 30.0)]
rdz.zset_remrangebyrank('test:not-exist', 0, 2) # 0
"""
return self.get_redis().zremrangebyrank(name, start, end)
def zset_remrangebyscore(self, name, min_score, max_score):
"""
描述:
根据【分数】范围从有序集合删除元素
min_score和max_score分数对应的元素也会被删除
参数:
name:string -redis的健名
min_score:int -最小分数 min<=
max_score:int -最大分数 <=max
返回:
rem_count:int -成功删除的元素个数, 如果有序集合不存在或者索引超出返回, 返回0
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30, 'd': 40, 'e': 50, 'f': 60}
rdz.zset_remrangebyscore('test:zset', 0, 40) # 4, test:zset -> [('e', 50.0), ('f', 60.0)]
rdz.zset_remrangebyscore('test:zset', 100, 200) # 0
# test:zset={'a': 10, 'b': 20, 'c': 30, 'd': 40, 'e': 50, 'f': 60}
rdz.zset_remrangebyscore('test:zset', 30, 50) # 3, test:zset -> [('a', 10.0), ('b', 20.0), ('f', 60.0)]
rdz.zset_remrangebyscore('test:not-exist', 0, 100) # 0
"""
return self.get_redis().zremrangebyscore(name, min_score, max_score)
def zset_scan(self, name, cursor=0, match=None, count=None, score_cast_func=float):
"""
描述:
基于游标的迭代器, 以【分片】的方式【批量】获取数据, 对于数据量较大的数据非常有用, 可以避免取出全部数据把内存撑爆
每次调用时, 返回一个更新的游标cursor和分片数据【列表】组成的元组, 请注意, 数据列表中的数据是【无须】的
match是匹配条件, 可以通过匹配条件对散列的key进行过滤
但是请注意,【match是在检索以后应用的】, 如果每次检索出来的集合包含较少满足条件的数据, 在大多数迭代数据可能都是空
count选项是每次分片的数据长度, 默认是10,
请注意, 即便设置了count, 也【不能确保每次取出来的数据长度】, 真实的长度可能会【大于或等于】设置的值, 甚至会一次全部取出
参数:
name:string -redis的键名
cursor:int -迭代器的游标
match:string -pattern匹配条件, 有如下可选项Å
h?llo -matches hello, hallo and hxllo
h*llo -matches hllo and heeeello
h[ae]llo -matches hello and hallo, but not hillo
h[^e]llo -matches hallo, hbllo, ... but not hello
h[a-b]llo -matches hallo and hbllo
count:int -每次分片的数据长度, 默认是10
返回:
cursor:int -更新的游标cursor
data:tuple -分片数据列表组成的元组
示例:
# 添加测试数据
maps = {}
for i in range(10000):
maps['k' + str(i)] = i
rdz.zset_add('test:xxx', maps)
cursor = 0
count = 0
while True:
cursor, data = rdz.zset_scan('test:xxx', cursor=cursor, count=20) # data为包含分片数据的列表 ->[('k3299', 3299.0), ('k6223', 6223.0),...]
print(cursor, data)
count += 1
if cursor == 0:
break
print(count) # 迭代了大约490次左右
"""
return self.get_redis().zscan(name, cursor=cursor, match=match, count=count, score_cast_func=score_cast_func)
def zset_scan_iter(self, name, match=None, count=None, score_cast_func=float):
"""
描述:
以迭代器的方式, 以【分片】的方式【批量】获取数据, 对于数据量较大的数据非常有用, 可以避免取出全部数据把内存撑爆
和zset_scan的主要区别是: zset_scan_iter【不需要记住游标】的位置, 迭代即可
参数:
name:string -redis的键名
match:string -pattern匹配条件, 有如下可选项
h?llo -matches hello, hallo and hxllo
h*llo -matches hllo and heeeello
h[ae]llo -matches hello and hallo, but not hillo
h[^e]llo -matches hallo, hbllo, ... but not hello
h[a-b]llo -matches hallo and hbllo
count:int -每次分片的数据长度, 默认是10
返回:
iter -迭代器
示例:
# 添加测试数据
maps = {}
for i in range(10000):
maps['k' + str(i)] = i
rdz.zset_add('test:xxx', maps)
for i in rdz.set_scan_iter('test:xxx'):
print(i) # ('k368', 368.0)
"""
return self.get_redis().zscan_iter(name, match=match, count=count, score_cast_func=score_cast_func)
# ------------------------------------------ lock ------------------------------------------
def acquire_lock(self, lock_name, lock_seconds=10, acquire_seconds=10):
"""
描述:
获取锁
Redis的事务是通过MULTI和EXEC命令实现的, 以确保一个客户端在不被其他客户端打断的情况下执行操作命令, 当一个事务执行完毕时, 才会处理其他客户端的命令
Python客户端以流水线(pipeline)的方式实现的事务, 一次将所有命令都发送给Redis, 同时还要配合WATCH命令, 以确保执行EXEC之前, 操作的键值没有被修改
这是一种"乐观锁", 不会阻止其他客户端对数据的修改, 而是WATCH到数据变化后, 会取消EXEC操作, 并以重试的方式再次检查操作条件是否满足, 再决定后续的操作
请注意, WATCH必须和EXEC配合使用才有意义, 单纯的WATCH是不起作用的
这种WATCH方式, 在有多个客户端操作相同数据时, 可能会造成大量的重试, 而且编码也比较麻烦
所以提供了acquire_lock/release_lock方法实现分布式锁
1.获取锁
-如果成功返回锁的标识符, 并且设置过期时间, 以避免客户端异常退出锁一直被占用问题
-如果锁已经存在, 则等待acquire_timeout, 如果在等待的时间内没有获取到, 则返回False
-如果锁已经存在, 但是没有设置过期时间, 则设置过期时间为lock_timeout, 以避免锁一直不可用的问题
2.释放锁
-通过标识符判断当前的锁是否已经发生变化, 如果没有变化, 则将锁删除, 如果有变化则返回False
参数:
lock_name:string -锁的名称, 可以有多个锁
lock_seconds:int -锁的过期时间, 超时自动移除锁
acquire_seconds:int -请求等待时间, 默认10秒, 如果acquire_seconds内没有获取到锁, 返回False
返回:
identifier:string|bool -如果获取成功, 返回锁对应的标识符, 如果获取失败, 返回False
示例:
def lock_test():
locked = rdz.acquire_lock('a-lock')
if locked is False:
return False
redis_conn = rdz.get_redis()
pipe = redis_conn.pipeline(True)
try:
pipe.set('a', 1)
pipe.set('b', 2)
pipe.execute()
finally:
redis_conn.release_lock('a-lock', locked)
"""
r = self.get_redis()
identifier = str(uuid.uuid4()) # 释放锁时检查
lock_name = gen_lock_name(lock_name)
lock_seconds = int(math.ceil(lock_seconds)) # 整数
end = time.time() + acquire_seconds
while time.time() < end:
if r.setnx(lock_name, identifier): # 如果lockname不存在, 设置lockname&过期时间, 并返回identifier
r.expire(lock_name, lock_seconds)
return identifier
elif r.ttl(lock_name) == -1: # 如果lockname没有设置到期时间, 则设置超时时间, 避免一直lock
r.expire(lock_name, lock_seconds)
time.sleep(0.01)
return False
def release_lock(self, lockname, identifier):
"""
描述:
释放锁
参数:
lockname:string -要释放锁的名称
identifier:string -要释放锁的标识符
返回:
result:bool -如果释放成功返回True, 否则返回False
示例:
# 请参考 acquire_lock
"""
pipe = self.get_redis().pipeline(True)
lockname = gen_lock_name(lockname)
while True:
try:
pipe.watch(lockname) # 通过watch确保lockname没有被改变过
if pipe.get(lockname) == identifier: # 判断锁标识符是否发生变化
pipe.multi()
pipe.delete(lockname)
pipe.execute() # execute中会调用unwatch
return True # 释放成功
pipe.unwatch()
break
except redis.exceptions.WatchError:
pass
return False # 失去了锁
# ------------------------------------------ ext ------------------------------------------
def get_names(self, pattern='*', **kwargs):
return self.keys(pattern=pattern, **kwargs)
def set_value(self, name, value, **kwargs):
"""
描述:
为name设置键值, 如果name对应的键值已经存在, 则会在原键值基础上进行操作(list/hast/set/zset), 如果要替换, 请先通过delete方法删除原键值
方法通过value的类型决定键值的类型, 规则如下:
-如果value类型是str/int/float, 将会以string类型存储
-如果value类型是list, 将会以list类型存储(如果list已经存在, 则会将value中的值添加到原list中)
-如果value类型是dict, 默认将会以hash类型存储, 设置关键字参数type='zset', 则将会以zset存储
-如果value类型是set, 将会以set类型存储
-其他类型, 将会引发异常
此方法是一个通用方法, 如果想要更详细的控制设值操作, 请调用对应类型的函数进行处理
返回:
result:bool|integer
-如果设置str, 返回True/False
-如果设置list, 返回list的长度
-如果设置hash/set/zset, 返回添加/修改的元素数量
参数:
name:string -redis的键名
value:str|int|float|list|dict|set -要设置的值
示例:
rdz.set_value('test:str', 'a') # str
rdz.set_value('test:str-number', 1.0) # number
rdz.set_value('test:list', [1, 2, 3]) # list
rdz.set_value('test:hash', {'a': 1, 'b': 2, 'c': 3}) # hash
rdz.set_value('test:set', {'x', 'y', 'z'}) # set
rdz.set_value('test:zset', {'x': 1, 'y': 2, 'z': 3}, type='zset') # zset
"""
if value is None:
return False
type_ = type(value)
if type_ is str or type_ is int or type_ is float:
return self.str_set(name, value)
if type_ is list:
return self.list_push(name, value)
if type_ is dict:
if kwargs.get('type') == 'zset':
return self.zset_add(name, value)
else:
return self.hash_set(name, mapping=value)
if type_ is set:
return self.set_add(name, value)
raise TypeError('only list/dict/set/str/int/float are supported.')
def get_value(self, name):
"""
描述:
返回name对应的键值, 会根据name的类型分类返回
参数:
name:string -要获取的键值名
返回:
result -获取到的键值结果, 不同的键值类型, 返回的结果类型不一样
示例:
rdz.get_value('test:str') # a
rdz.get_value('test:str-number') # 1.0
rdz.get_value('test:list') # ['1', '2', '3']
rdz.get_value('test:hash') # {'a': '1', 'b': '2', 'c': '3'}
rdz.get_value('test:set') # {'x', 'y', 'z'}
rdz.get_value('test:zset') # [('x', 1.0), ('y', 2.0), ('z', 3.0)]
"""
type_ = self.get_type(name)
if type_ == 'string':
return self.str_get(name)
if type_ == 'list':
return self.list_getall(name)
if type_ == 'hash':
return self.hash_getall(name)
if type_ == 'set':
return self.set_members(name)
if type_ == 'zset':
return self.zset_range(name, 0, -1, withscores=True)
return None
# ----------------- list -----------------
def list_getall(self, name, nx_none=False):
"""
描述:
返回指定列表的所有元素
如果指定列表不存在, 返回[]
如果指定列表不存在, 但是nx_none=True, 返回None
参数:
name:string -redis的键名
nx_none:bool -设置nx_none=True, 在列表不存在时, 返回None
返回:
result:list -列表中的元素组成的list
示例:
# test:numbers = ['1', '2', '3', '4', '5', '6']
rdz.list_getall('test:numbers') # ['1', '2', '3', '4', '5', '6']
rdz.list_getall('test:not-exist') # 列表不存在, 返回[]
rdz.list_getall('test:not-exist', nx_none=True) # 列表不存在, 返回None
"""
r = self.get_redis()
if nx_none is True and r.exists(name) == 0:
return None
return r.lrange(name, 0, -1)
def list_exists(self, name, value):
"""
描述:
检查指定列表中是否存在指定的值
参数:
name:string -redis的健名
value:string|int|float -指定的元素
返回:
is_exists:bool -如果元素存在返回True, 否则返回False
示例:
# test:numbers=[1, 2, 3]
rdz.list_exists('test:numbers', 1) # True
rdz.list_exists('test:numbers', 10) # False
rdz.list_exists('test:not-exist', 1) # False
"""
value = str(value)
for item in self.list_iter(name): # 遍历列表
if value == item:
return True
return False
# ----------------- set -----------------
def set_len(self, name):
"""
描述
获取指定集合的元素个数
参数:
name:string -redis的键名
返回:
count:int -元素个数, 如果集合不存在, 返回0
示例:
# test:letters = {'a', 'b', 'c', 'd', 'e'}
rdz.set_len('test:letters') # 5
rdz.set_len('test:not-exist') # 0
"""
return self.set_card(name)
def set_exists(self, name, value):
"""
描述:
检查value是否是指定集合的元素
参数:
name:string -redis的键名
value:string|int|float -要检查的元素
返回:
is:bool -如果value是集合的成员, 返回True, 否则返回False, 集合不存在也返回False
示例:
# test:letters = {'a', 'b', 'c', 'd', 'e'}
rdz.set_exists('test:letters', 'a') # True
rdz.set_exists('test:letters', 'x') # False
rdz.set_exists('test:not-exist', 'a') # False
"""
return self.set_ismember(name, value)
def set_getall(self, name):
"""
描述:
获取指定集合中所有元素组成的set, 如果集合不存在返回一个空set对象
因为集合是无序的, 所以每次取出的set元素顺序可能都是不一样的
参数:
name:string -redis的键名
返回:
members:set -所有集合元素组成的set
示例:
#test:letters = {'a', 'b', 'c', 'd', 'e'}
rdz.set_getall('test:letters') # {'a', 'b', 'c', 'd', 'e'}
rdz.set_getall('test:not-exist') # set()
"""
return self.set_members(name)
# ----------------- zset -----------------
def zset_index(self, name, member):
"""
描述:
获取指定的元素在有序集合中的索引(从0开始)
参数:
name:string -redis的键名
member:str -指定的元素
返回:
index:int -value元素在有序集合中的索引, 如果有序集合不存在/元素不存在返回None
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30}
rdz.zset_index('test:zset', 'a') # 0
rdz.zset_index('test:zset', 'b') # 1
rdz.zset_index('test:zset', 'x') # None
rdz.zset_index('test:not-exist', 'x') # None
"""
return self.zset_rank(name, member)
def zset_len(self, name):
"""
描述:
获取指定有序集合元素的数量
参数:
name:string -redis的键名
返回:
count:int -指定有序集合中元素的数量, 如果zset不存在则返回0
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30}
rdz.zset_len('test:zset') # 3
rdz.zset_len('test:not-exist') # 0
"""
return self.zset_card(name)
def zset_getall(self, name, withscores=True):
"""
描述:
返回指定有序集合的所有元素
参数:
name -redis的键名
withscores -默认只获取元素, 设置withscores=True, 会把分数也一起返回
返回:
values:list -有序集合所有元素的列表, 如果withscores=True则返回包含元素和分数元组的列表
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30}
rdz.zset_getall('test:zset') # 返回元素和分数, [('a', 10.0), ('b', 20.0), ('c', 30.0)]
rdz.zset_getall('test:zset', withscores=False) # ['a', 'b', 'c']
"""
return self.zset_range(name, 0, -1, withscores=withscores)
def zset_exists(self, name, member):
"""
描述:
检查指定有序集合中是否存在指定的元素
参数:
name:string -redis的健名
member:string -指定的元素
返回:
is_exists:bool -如果元素存在返回True, 否则返回False
示例:
# test:zset={'a': 10, 'b': 20, 'c': 30}
rdz.zset_exists('test:zset', 'a') # True
rdz.zset_exists('test:zset', 'b') # True
rdz.zset_exists('test:zset', 'x') # False
rdz.zset_exists('test:not-exist', 'x') # False
"""
return self.zset_rank(name, member) is not None
# ------------------------------------------ pubsub ------------------------------------------
def get_pubsub(self):
"""
描述:
返回一个发布/订阅对象, 可以订阅频道并收听发布到的消息
返回L
pubsub:PubSub - PubSub订阅发布对象
示例:
pubsub = rdz.get_pubsub()
pubsub.subscribe('ws:channel')
"""
return self.redis_ins.pubsub()
def publish(self, channel, message, **kwargs):
"""
描述:
向指定的频道发送消息
参数:
channel:string -指定的频道/频道列表
message:string -要发送的消息
示例:
rdz.publish('channel1','This is a message') # 向channel1发送消息
"""
channel_type = type(channel)
if channel_type is list or channel_type is tuple:
for chl in channel:
self.get_redis().publish(chl, message, **kwargs)
else:
return self.get_redis().publish(channel, message, **kwargs)
def subscribe(self, channels, callback, thread=False):
"""
描述:
订阅一个或多个频道, 当有消息发布到指定频道时, callback函数将会被回掉以处理消息
如果直接订阅频道, 代码会被block住, 不往下运行, 所以提供了thead参数, 用于以线程的方式处理订阅
参数:
channels:string|list -要订阅的一个或多个频道
callback:func -回调函数, 如果callback返回False, 则退订
thread:bool -如果thread=True, 将会启动一个线程处理订阅
示例:
def consumer(msg): # 消息回调函数, msg是收到的消息对象
data = msg.get('data')
if type(data) == bytes:
data = data.decode('utf-8')
if data == 'exit':
return False # 返回False以退订
print(msg)
rdz.subscribe(['channel1', 'channel2'], consumer, thread=True) # 已线程的方式订阅'channel1', 'channel2'两个频道
print('thread != True 则不会运行到此行代码')
"""
if thread is True:
threading.Thread(target=subscribe, args=(self, channels, callback)).start()
else:
subscribe(self, channels, callback)
__version__ = '0.3.0'
|
test_larcv_client.py
|
import os,sys,time
from ublarcvserver import ublarcvserver
from multiprocessing import Process
from larcv import larcv
from ROOT import std
"""
This script is used to test the Majordomo classes.
We implement a dummy setup where the client and worker just say hello to each other.
Also servers as an example.
We also setup the basic larcv client and worker, which pass larcv images back and forth.
"""
verbose = False
def start_worker( endpoint ):
global verbose
print "start worker on ",endpoint
worker = ublarcvserver.MirrorWorker(endpoint, verbose)
print "worker started: ",worker.get_id_name()
worker.run()
#while 1:
# time.sleep(1)
# endpoint:
endpoint = "tcp://localhost:6005"
bindpoint = "tcp://*:6005"
# SETUP THE LARCV INPUT
input_rootfile = sys.argv[1]
io = larcv.IOManager(larcv.IOManager.kREAD)
io.add_in_file( input_rootfile )
io.initialize()
# setup the worker
pworker = Process(target=start_worker,args=(endpoint,))
pworker.daemon = True
pworker.start()
#worker = ublarcvserver.DummyWorker(endpoint, True)
print "worker process created"
# setup the broker
broker = ublarcvserver.MDBroker(bindpoint, verbose)
broker.start()
print "broker started"
# setup the client
client = ublarcvserver.LArCVClient(endpoint, "mirror", verbose, ublarcvserver.LArCVClient.kSPARSE)
print "client created"
for x in xrange(5):
io.read_entry(x)
print "REQUEST %d"%(x+1)
# get images from tree
event_images = io.get_data(larcv.kProductImage2D,"wire")
# load images into client
for iimg in xrange(event_images.Image2DArray().size()):
client.addImageAsPixelList( event_images.Image2DArray().at(iimg), 10.0 )
# send images and get reply
client.request()
time.sleep(1)
# get the images back
reply_img_v = std.vector("larcv::Image2D")()
client.takeImages( reply_img_v )
print "returned %d images"%(reply_img_v.size())
for iimg in xrange(reply_img_v.size()):
print " img[{}] {}".format(iimg,reply_img_v.at(iimg).meta().dump())
print "[ENTER] to end"
raw_input()
|
run_unittests.py
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import zipfile, tarfile
import hashlib
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import typing as T
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.dependencies.base
import mesonbuild.compilers
import mesonbuild.envconfig
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.interpreterbase import typed_pos_args, InvalidArguments
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, windows_proof_rm, python_command,
version_compare, split_args, quote_arg, relpath, is_linux, git
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException, OptionKey
from mesonbuild.dependencies import PkgConfigDependency
from mesonbuild.programs import ExternalProgram
import mesonbuild.dependencies.base
from mesonbuild.build import Target, ConfigurationData
import mesonbuild.modules.pkgconfig
from mesonbuild.scripts import destdir_join
from mesonbuild.mtest import TAPParser, TestResult
from mesonbuild.wrap.wrap import PackageDefinition, WrapException
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
if T.TYPE_CHECKING:
from mesonbuild.compilers import Compiler
URLOPEN_TIMEOUT = 5
@contextmanager
def chdir(path: str):
curdir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(curdir)
def get_dynamic_section_entry(fname: str, entry: str) -> T.Optional[str]:
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return str(m.group(1))
return None # The file did not contain the specified entry.
def get_soname(fname: str) -> T.Optional[str]:
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname: str) -> T.Optional[str]:
raw = get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
# Get both '' and None here
if not raw:
return None
# nix/nixos adds a bunch of stuff to the rpath out of necessity that we
# don't check for, so clear those
final = ':'.join([e for e in raw.split(':') if not e.startswith('/nix')])
return final
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def _git_init(project_dir):
# If a user has git configuration init.defaultBranch set we want to override that
with tempfile.TemporaryDirectory() as d:
out = git(['--version'], str(d))[1]
if version_compare(mesonbuild.environment.search_version(out), '>= 2.28'):
extra_cmd = ['--initial-branch', 'master']
else:
extra_cmd = []
subprocess.check_call(['git', 'init'] + extra_cmd, cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
_git_add_all(project_dir)
def _git_add_all(project_dir):
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest(f'pkg-config dependency {depname} not found.')
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, f'detect_{lang}_compiler')
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest(f'No {lang} compiler found.')
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest(f'Env var {key!r} set, skipping')
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
key = OptionKey(feature)
if key not in cc.base_options:
raise unittest.SkipTest(
f'{feature} not available with {cc.id}')
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10'), '2016.10')
self.assertEqual(searchfunc('2016.10 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('oops v1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.oops 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.x'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class_none_flush(self):
cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock())
a = cc.compiler_args(['-I.'])
#first we are checking if the tree construction deduplicates the correct -I argument
a += ['-I..']
a += ['-I./tests/']
a += ['-I./tests2/']
#think this here as assertion, we cannot apply it, otherwise the CompilerArgs would already flush the changes:
# assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..', '-I.'])
a += ['-I.']
a += ['-I.', '-I./tests/']
self.assertEqual(a, ['-I.', '-I./tests/', '-I./tests2/', '-I..'])
#then we are checking that when CompilerArgs already have a build container list, that the deduplication is taking the correct one
a += ['-I.', '-I./tests2/']
self.assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..'])
def test_compiler_args_class_d(self):
d = mesonbuild.compilers.DmdDCompiler([], 'fake', MachineChoice.HOST, 'info', 'arch')
# check include order is kept when deduplicating
a = d.compiler_args(['-Ifirst', '-Isecond', '-Ithird'])
a += ['-Ifirst']
self.assertEqual(a, ['-Ifirst', '-Isecond', '-Ithird'])
def test_compiler_args_class_clike(self):
cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock())
# Test that empty initialization works
a = cc.compiler_args()
self.assertEqual(a, [])
# Test that list initialization works
a = cc.compiler_args(['-I.', '-I..'])
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cc.compiler_args(['-I.', '-I.']), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
def test_unholder(self):
unholder = mesonbuild.mesonlib.unholder
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
holders = [holder1, holder3]
self.assertEqual(1, unholder(holder1))
self.assertEqual([1], unholder([holder1]))
self.assertEqual([1, 3], unholder(holders))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
# flatten nested lists
kwargs = {'sources': [1, [2, [3]]]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
def test_pkgconfig_module(self):
dummystate = mock.Mock()
dummystate.subproject = 'dummy'
_mock = mock.Mock(spec=mesonbuild.dependencies.ExternalDependency)
_mock.pcdep = mock.Mock()
_mock.pcdep.name = "some_name"
_mock.version_reqs = []
_mock = mock.Mock(held_object=_mock)
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_libs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_reqs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc._find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
@skipIfNoPkgconfig
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.options[OptionKey('link_args', lang='c')] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[-1] == 'foo':
return 0, f'-L{p2.as_posix()} -lfoo -L{p1.as_posix()} -lbar', ''
if args[-1] == 'bar':
return 0, f'-L{p2.as_posix()} -lbar', ''
if args[-1] == 'internal':
return 0, f'-L{p1.as_posix()} -lpthread -lm -lc -lrt -ldl', ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn(f'lib{lib}.a', link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), f'{ver_a} {name} {ver_b}')
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), f'{ver_a} {name} {ver_b}')
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), f'{ver_a} {name} {ver_b}')
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), f'{ver_a} {name} {ver_b}')
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), f'{ver_a} {name} {ver_b}')
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg=f'{vctools_ver!r} does not start with {toolset_ver!r}')
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(sorted(deps), sorted(expdeps))
def test_log_once(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once('foo')
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual, 'foo', actual)
def test_log_once_ansi(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
f.truncate()
mesonbuild.mlog.warning('bar', once=True)
mesonbuild.mlog.warning('bar', once=True)
actual = f.getvalue().strip()
self.assertEqual(actual.count('bar'), 1, actual)
def test_sort_libpaths(self):
sort_libpaths = mesonbuild.dependencies.base.sort_libpaths
self.assertEqual(sort_libpaths(
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
def test_dependency_factory_order(self):
b = mesonbuild.dependencies.base
with tempfile.TemporaryDirectory() as tmpdir:
with chdir(tmpdir):
env = get_fake_env()
env.scratch_dir = tmpdir
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.PKGCONFIG, b.DependencyMethods.CMAKE]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['pkgconfig', 'cmake'])
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.CMAKE, b.DependencyMethods.PKGCONFIG]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['cmake', 'pkgconfig'])
def test_validate_json(self) -> None:
"""Validate the json schema for the test cases."""
try:
from jsonschema import validate, ValidationError
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('Python jsonschema module not found.')
with Path('data/test.schema.json').open() as f:
schema = json.load(f)
errors = [] # type: T.Tuple[str, Exception]
for p in Path('test cases').glob('**/test.json'):
with p.open() as f:
try:
validate(json.load(f), schema=schema)
except ValidationError as e:
errors.append((p.resolve(), e))
for f, e in errors:
print(f'Failed to validate: "{f}"')
print(str(e))
self.assertFalse(errors)
def test_typed_pos_args_types(self) -> None:
@typed_pos_args('foo', str, int, bool)
def _(obj, node, args: T.Tuple[str, int, bool], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], int)
self.assertIsInstance(args[2], bool)
_(None, mock.Mock(), ['string', 1, False], None)
def test_typed_pos_args_types_invalid(self) -> None:
@typed_pos_args('foo', str, int, bool)
def _(obj, node, args: T.Tuple[str, int, bool], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 1.0, False], None)
self.assertEqual(str(cm.exception), 'foo argument 2 was of type "float" but should have been "int"')
def test_typed_pos_args_types_wrong_number(self) -> None:
@typed_pos_args('foo', str, int, bool)
def _(obj, node, args: T.Tuple[str, int, bool], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 1], None)
self.assertEqual(str(cm.exception), 'foo takes exactly 3 arguments, but got 2.')
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 1, True, True], None)
self.assertEqual(str(cm.exception), 'foo takes exactly 3 arguments, but got 4.')
def test_typed_pos_args_varargs(self) -> None:
@typed_pos_args('foo', str, varargs=str)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], list)
self.assertIsInstance(args[1][0], str)
self.assertIsInstance(args[1][1], str)
_(None, mock.Mock(), ['string', 'var', 'args'], None)
def test_typed_pos_args_varargs_not_given(self) -> None:
@typed_pos_args('foo', str, varargs=str)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], list)
self.assertEqual(args[1], [])
_(None, mock.Mock(), ['string'], None)
def test_typed_pos_args_varargs_invalid(self) -> None:
@typed_pos_args('foo', str, varargs=str)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args', 0], None)
self.assertEqual(str(cm.exception), 'foo argument 4 was of type "int" but should have been "str"')
def test_typed_pos_args_varargs_invalid_mulitple_types(self) -> None:
@typed_pos_args('foo', str, varargs=(str, list))
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args', 0], None)
self.assertEqual(str(cm.exception), 'foo argument 4 was of type "int" but should have been one of: "str", "list"')
def test_typed_pos_args_max_varargs(self) -> None:
@typed_pos_args('foo', str, varargs=str, max_varargs=5)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], list)
self.assertIsInstance(args[1][0], str)
self.assertIsInstance(args[1][1], str)
_(None, mock.Mock(), ['string', 'var', 'args'], None)
def test_typed_pos_args_max_varargs_exceeded(self) -> None:
@typed_pos_args('foo', str, varargs=str, max_varargs=1)
def _(obj, node, args: T.Tuple[str, T.Tuple[str, ...]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args'], None)
self.assertEqual(str(cm.exception), 'foo takes between 1 and 2 arguments, but got 3.')
def test_typed_pos_args_min_varargs(self) -> None:
@typed_pos_args('foo', varargs=str, max_varargs=2, min_varargs=1)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], list)
self.assertIsInstance(args[0][0], str)
self.assertIsInstance(args[0][1], str)
_(None, mock.Mock(), ['string', 'var'], None)
def test_typed_pos_args_min_varargs_not_met(self) -> None:
@typed_pos_args('foo', str, varargs=str, min_varargs=1)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(str(cm.exception), 'foo takes at least 2 arguments, but got 1.')
def test_typed_pos_args_min_and_max_varargs_exceeded(self) -> None:
@typed_pos_args('foo', str, varargs=str, min_varargs=1, max_varargs=2)
def _(obj, node, args: T.Tuple[str, T.Tuple[str, ...]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args', 'bar'], None)
self.assertEqual(str(cm.exception), 'foo takes between 2 and 3 arguments, but got 4.')
def test_typed_pos_args_min_and_max_varargs_not_met(self) -> None:
@typed_pos_args('foo', str, varargs=str, min_varargs=1, max_varargs=2)
def _(obj, node, args: T.Tuple[str, T.Tuple[str, ...]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(str(cm.exception), 'foo takes between 2 and 3 arguments, but got 1.')
def test_typed_pos_args_variadic_and_optional(self) -> None:
@typed_pos_args('foo', str, optargs=[str], varargs=str, min_varargs=0)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(AssertionError) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(
str(cm.exception),
'varargs and optargs not supported together as this would be ambiguous')
def test_typed_pos_args_min_optargs_not_met(self) -> None:
@typed_pos_args('foo', str, str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(str(cm.exception), 'foo takes at least 2 arguments, but got 1.')
def test_typed_pos_args_min_optargs_max_exceeded(self) -> None:
@typed_pos_args('foo', str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', '1', '2'], None)
self.assertEqual(str(cm.exception), 'foo takes at most 2 arguments, but got 3.')
def test_typed_pos_args_optargs_not_given(self) -> None:
@typed_pos_args('foo', str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertEqual(len(args), 2)
self.assertIsInstance(args[0], str)
self.assertEqual(args[0], 'string')
self.assertIsNone(args[1])
_(None, mock.Mock(), ['string'], None)
def test_typed_pos_args_optargs_some_given(self) -> None:
@typed_pos_args('foo', str, optargs=[str, int])
def _(obj, node, args: T.Tuple[str, T.Optional[str], T.Optional[int]], kwargs) -> None:
self.assertEqual(len(args), 3)
self.assertIsInstance(args[0], str)
self.assertEqual(args[0], 'string')
self.assertIsInstance(args[1], str)
self.assertEqual(args[1], '1')
self.assertIsNone(args[2])
_(None, mock.Mock(), ['string', '1'], None)
def test_typed_pos_args_optargs_all_given(self) -> None:
@typed_pos_args('foo', str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertEqual(len(args), 2)
self.assertIsInstance(args[0], str)
self.assertEqual(args[0], 'string')
self.assertIsInstance(args[1], str)
_(None, mock.Mock(), ['string', '1'], None)
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options():
self.assertIn(str(opt), md)
for opt in comp.base_options:
self.assertIn(str(opt), md)
self.assertNotIn('b_unknown', md)
@staticmethod
def _get_section_content(name, sections, md):
for section in sections:
if section and section.group(1) == name:
try:
next_section = next(sections)
end = next_section.start()
except StopIteration:
end = len(md)
# Extract the content for this section
return md[section.end():end]
raise RuntimeError(f'Could not find "{name}" heading')
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
from itertools import tee
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
# Extract the content for this section
content = self._get_section_content("Universal options", sections, md)
subsections = tee(re.finditer(r"^### (.+)$", content, re.MULTILINE))
subcontent1 = self._get_section_content("Directories", subsections[0], content)
subcontent2 = self._get_section_content("Core options", subsections[1], content)
for subcontent in (subcontent1, subcontent2):
# Find the option names
options = set()
# Match either a table row or a table heading separator: | ------ |
rows = re.finditer(r"^\|(?: (\w+) .* | *-+ *)\|", subcontent, re.MULTILINE)
# Skip the header of the first table
next(rows)
# Skip the heading separator of the first table
next(rows)
for m in rows:
value = m.group(1)
# End when the `buildtype` table starts
if value is None:
break
options.add(value)
self.assertEqual(len(found_entries & options), 0)
found_entries |= options
self.assertEqual(found_entries, {
*[str(k) for k in mesonbuild.coredata.BUILTIN_OPTIONS],
*[str(k) for k in mesonbuild.coredata.BUILTIN_OPTIONS_PER_MACHINE],
})
# Check that `buildtype` table inside `Core options` matches how
# setting of builtin options behaves
#
# Find all tables inside this subsection
tables = re.finditer(r"^\| (\w+) .* \|\n\| *[-|\s]+ *\|$", subcontent2, re.MULTILINE)
# Get the table we want using the header of the first column
table = self._get_section_content('buildtype', tables, subcontent2)
# Get table row data
rows = re.finditer(r"^\|(?: (\w+)\s+\| (\w+)\s+\| (\w+) .* | *-+ *)\|", table, re.MULTILINE)
env = get_fake_env()
for m in rows:
buildtype, debug, opt = m.groups()
if debug == 'true':
debug = True
elif debug == 'false':
debug = False
else:
raise RuntimeError(f'Invalid debug value {debug!r} in row:\n{m.group()}')
env.coredata.set_option(OptionKey('buildtype'), buildtype)
self.assertEqual(env.coredata.options[OptionKey('buildtype')].value, buildtype)
self.assertEqual(env.coredata.options[OptionKey('optimization')].value, opt)
self.assertEqual(env.coredata.options[OptionKey('debug')].value, debug)
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
content = self._get_section_content("CPU families", sections, md)
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
def test_mesondata_is_up_to_date(self):
from mesonbuild.mesondata import mesondata
err_msg = textwrap.dedent('''
###########################################################
### mesonbuild.mesondata is not up-to-date ###
### Please regenerate it by running tools/gen_data.py ###
###########################################################
''')
root_dir = Path(__file__).resolve().parent
mesonbuild_dir = root_dir / 'mesonbuild'
data_dirs = mesonbuild_dir.glob('**/data')
data_files = [] # type: T.List[T.Tuple(str, str)]
for i in data_dirs:
for p in i.iterdir():
data_files += [(p.relative_to(mesonbuild_dir).as_posix(), hashlib.sha256(p.read_bytes()).hexdigest())]
current_files = set(mesondata.keys())
scanned_files = {x[0] for x in data_files}
self.assertSetEqual(current_files, scanned_files, err_msg + 'Data files were added or removed\n')
errors = []
for i in data_files:
if mesondata[i[0]].sha256sum != i[1]:
errors += [i[0]]
self.assertListEqual(errors, [], err_msg + 'Files were changed')
class BasePlatformTests(unittest.TestCase):
prefix = '/usr'
libdir = 'lib'
def setUp(self):
super().setUp()
self.maxDiff = None
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_native_file = None
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
self.linuxlike_test_dir = os.path.join(src_root, 'test cases/linuxlike')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = [f'UNKNOWN BACKEND {self.backend.name!r}']
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print(f"{log!r} doesn't exist")
return
with open(log, encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
encoding='utf-8',
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None,
workdir=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix]
if self.libdir:
args += ['--libdir', self.libdir]
if self.meson_native_file:
args += ['--native-file', self.meson_native_file]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args, override_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars, workdir=workdir)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
with mock.patch.dict(os.environ, override_envvars):
run_mtest_inprocess(['-C', self.builddir])
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'{self.backend.name!r} backend can\'t install files')
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'Compiler db not available with {self.backend.name} backend')
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def get_meson_log_sanitychecks(self):
'''
Same as above, but for the sanity checks that were run
'''
log = self.get_meson_log()
prefix = 'Sanity check compiler command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = f'{path!r} does not end with {basename!r}'
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertReconfiguredBuildIsNoop(self):
'Assert that we reconfigured and then there was nothing to do'
ret = self.build()
self.assertIn('The Meson build system', ret)
if self.backend is Backend.ninja:
for line in ret.split('\n'):
if line in self.no_rebuild_stdout:
break
else:
raise AssertionError('build was reconfigured, but was not no-op')
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
# XXX: Note CustomBuild did indeed rebuild, because of the regen checker!
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError(f'Invalid backend: {self.backend.name!r}')
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target of each type said that no rebuild was done
# We always have at least one CustomBuild target for the regen checker
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('CustomBuild:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError(f'Invalid backend: {self.backend.name!r}')
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(f'Linking target {target}', ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError(f'Invalid backend: {self.backend.name!r}')
@staticmethod
def get_target_from_filename(filename):
base = os.path.splitext(filename)[0]
if base.startswith(('lib', 'cyg')):
return base[3:]
return base
def assertBuildRelinkedOnlyTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
linked_targets = []
for line in ret.split('\n'):
if 'Linking target' in line:
fname = line.rsplit('target ')[-1]
linked_targets.append(self.get_target_from_filename(fname))
self.assertEqual(linked_targets, [target])
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile(r'Link:\n [^\n]*link.exe[^\n]*/OUT:".\\([^"]*)"', flags=re.IGNORECASE)
matches = linkre.findall(ret)
self.assertEqual(len(matches), 1, msg=matches)
self.assertEqual(self.get_target_from_filename(matches[0]), target)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError(f'Invalid backend: {self.backend.name!r}')
def assertPathExists(self, path):
m = f'Path {path!r} should exist'
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = f'Path {path!r} should not exist'
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '88 default options')
self.init(testdir, default_args=False, inprocess=True)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
break
else:
raise self.fail('Did not find option "prefix"')
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_do_conf_file_by_format(self):
def conf_str(in_data, confdata, vformat):
(result, missing_variables, confdata_useless) = mesonbuild.mesonlib.do_conf_str(in_data, confdata, variable_format = vformat)
return '\n'.join(result)
def check_formats(confdata, result):
self.assertEqual(conf_str(['#mesondefine VAR'], confdata, 'meson'), result)
self.assertEqual(conf_str(['#cmakedefine VAR ${VAR}'], confdata, 'cmake'), result)
self.assertEqual(conf_str(['#cmakedefine VAR @VAR@'], confdata, 'cmake@'), result)
confdata = ConfigurationData()
# Key error as they do not exists
check_formats(confdata, '/* #undef VAR */\n')
# Check boolean
confdata.values = {'VAR': (False, 'description')}
check_formats(confdata, '#undef VAR\n')
confdata.values = {'VAR': (True, 'description')}
check_formats(confdata, '#define VAR\n')
# Check string
confdata.values = {'VAR': ('value', 'description')}
check_formats(confdata, '#define VAR value\n')
# Check integer
confdata.values = {'VAR': (10, 'description')}
check_formats(confdata, '#define VAR 10\n')
# Check multiple string with cmake formats
confdata.values = {'VAR': ('value', 'description')}
self.assertEqual(conf_str(['#cmakedefine VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value')
self.assertEqual(conf_str(['#cmakedefine VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value')
# Handles meson format exceptions
# Unknown format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'unknown_format')
# More than 2 params in mesondefine
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'meson')
# Mismatched line with format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#cmakedefine VAR'], confdata, 'meson')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake@')
# Dict value in confdata
confdata.values = {'VAR': (['value'], 'description')}
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'meson')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '88 default options')
# on Windows, /someabs is *not* an absolute path
prefix = 'x:/someabs' if is_windows() else '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
if is_windows():
args = ['--prefix', 'x:/opt', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
if is_windows():
args = ['--prefix', 'x:/usr', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
if is_windows():
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=x:/opt', False)
else:
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
if mesonbuild.mesonlib.default_prefix() == '/usr/local':
expected[None] = expected['/usr/local']
for prefix in expected:
args = []
if prefix:
args += ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '164 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '52 run target')
self.init(testdir)
self.run_target('check_exists')
self.run_target('check-env')
self.run_target('check-env-ct')
def test_run_target_subdir(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '52 run target')
self.init(testdir)
self.run_target('textprinter')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'{self.backend.name!r} backend can\'t install files')
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '60 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
'new_directory': 'share/new_directory',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'{self.backend.name!r} backend can\'t install files')
testdir = os.path.join(self.common_test_dir, '141 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '60 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
def read_logs():
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
return list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
logged = read_logs()
for name in logged:
self.assertTrue(name in expected, f'Log contains extra entry {name}')
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, f'Log is missing entry for {name}')
self.assertLess(count, 2, f'Log has multiple entries for {name}')
# Verify that with --dry-run we obtain the same logs but with nothing
# actually installed
windows_proof_rmtree(self.installdir)
self._run(self.meson_command + ['install', '--dry-run', '--destdir', self.installdir], workdir=self.builddir)
self.assertEqual(logged, read_logs())
self.assertFalse(os.path.exists(self.installdir))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
dirname = os.path.join(self.installdir, 'usr/share/dir')
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
self.assertPathDoesNotExist(dirname)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_nopromote(self):
testdir = os.path.join(self.common_test_dir, '99 subproject subdir')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--wrap-mode=nopromote'])
self.assertIn('Dependency "subsub" not found', cm.exception.stdout)
def test_force_fallback_for(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--force-fallback-for=zlib,foo'])
self.build()
self.run_tests()
def test_testrepeat(self):
testdir = os.path.join(self.common_test_dir, '207 tap tests')
self.init(testdir)
self.build()
self._run(self.mtest_command + ['--repeat=2'])
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt'), encoding='utf-8') as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt'), encoding='utf-8') as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
# Setup that does not define a wrapper works with --wrapper
self._run(self.mtest_command + ['--setup=timeout', '--wrapper', shutil.which('valgrind')])
# Setup that skips test works
self._run(self.mtest_command + ['--setup=good'])
with open(os.path.join(self.logdir, 'testlog-good.txt'), encoding='utf-8') as f:
exclude_suites_log = f.read()
self.assertFalse('buggy' in exclude_suites_log)
# --suite overrides add_test_setup(xclude_suites)
self._run(self.mtest_command + ['--setup=good', '--suite', 'buggy'])
with open(os.path.join(self.logdir, 'testlog-good.txt'), encoding='utf-8') as f:
include_suites_log = f.read()
self.assertTrue('buggy' in include_suites_log)
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt'), encoding='utf-8') as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt'), encoding='utf-8') as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt'), encoding='utf-8') as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '130 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
if mesonbuild.environment.detect_msys2_arch() and ('MESON_RSP_THRESHOLD' in os.environ):
raise unittest.SkipTest('Test does not yet support gcc rsp files on msys2')
testdir = os.path.join(self.common_test_dir, '131 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# Need to run the build so the private dir is created.
self.build()
pdirs = glob(os.path.join(self.builddir, 'sub4/someexe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], "-I" + privdir)
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
pdirs = glob(os.path.join(self.builddir, 'somefxe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], '-I' + privdir)
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows() and platform.machine().lower() != 'e2k':
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, f'detect_{lang}_compiler')(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError(f'Unknown compiler {evalue!r}')
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, f'detect_{lang}_compiler')(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_sunos():
self.assertIsInstance(cc.linker, (mesonbuild.linkers.SolarisDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin))
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl. This can be either an
# ld-like linker of link.exe-like linker (usually the
# former for msys2, the latter otherwise)
self.assertIsInstance(cc.linker, (mesonbuild.linkers.MSVCDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin))
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
os.environ[evar] = ' '.join(quote_arg(w) for w in wrappercc)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
os.environ['AR'] = ' '.join(quote_arg(w) for w in wrapperlinker)
# Need a new env to re-run environment loading
env = get_fake_env(testdir, self.builddir, self.prefix)
wcc = getattr(env, f'detect_{lang}_compiler')(MachineChoice.HOST)
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '134 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '133 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = f'-D{define}="{value}"'
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'
self.init(testdir, extra_args=[f'-D{define}={value}'], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '110 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_noop_changes_cause_no_rebuilds(self):
'''
Test that no-op changes to the build files such as mtime do not cause
a rebuild of anything.
'''
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of meson.build should not rebuild anything
self.utime(os.path.join(testdir, 'meson.build'))
self.assertReconfiguredBuildIsNoop()
# Changing mtime of libefile.c should rebuild the library, but not relink the executable
self.utime(os.path.join(testdir, 'libfile.c'))
self.assertBuildRelinkedOnlyTarget('mylib')
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertBuildRelinkedOnlyTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '58 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertBuildRelinkedOnlyTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '91 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
@skip_if_not_base_option('b_lto_threads')
def test_lto_threads(self):
if is_cygwin():
raise unittest.SkipTest('LTO is broken on Cygwin.')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
extra_args: T.List[str] = []
if cc.get_id() == 'clang':
if is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
else:
extra_args.append('-D_cargs=-Werror=unused-command-line-argument')
self.init(testdir, extra_args=['-Db_lto=true', '-Db_lto_threads=8'] + extra_args)
self.build()
self.run_tests()
expected = set(cc.get_lto_compile_args(threads=8))
targets = self.introspect('--targets')
# This assumes all of the targets support lto
for t in targets:
for s in t['target_sources']:
for e in expected:
self.assertIn(e, s['parameters'])
@skip_if_not_base_option('b_lto_mode')
@skip_if_not_base_option('b_lto_threads')
def test_lto_mode(self):
testdir = os.path.join(self.common_test_dir, '6 linkshared')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() != 'clang':
raise unittest.SkipTest('Only clang currently supports thinLTO')
if cc.linker.id not in {'ld.lld', 'ld.gold', 'ld64', 'lld-link'}:
raise unittest.SkipTest('thinLTO requires ld.lld, ld.gold, ld64, or lld-link')
elif is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args=['-Db_lto=true', '-Db_lto_mode=thin', '-Db_lto_threads=8', '-Dc_args=-Werror=unused-command-line-argument'])
self.build()
self.run_tests()
expected = set(cc.get_lto_compile_args(threads=8, mode='thin'))
targets = self.introspect('--targets')
# This assumes all of the targets support lto
for t in targets:
for s in t['target_sources']:
self.assertTrue(expected.issubset(set(s['parameters'])), f'Incorrect values for {t["name"]}')
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init, _git_add_all)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def has_working_hg(self):
if not shutil.which('hg'):
return False
try:
# This check should not be necessary, but
# CI under macOS passes the above test even
# though Mercurial is not installed.
if subprocess.call(['hg', '--version'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
return False
return True
except FileNotFoundError:
return False
def test_dist_hg(self):
if not self.has_working_hg():
raise unittest.SkipTest('Mercurial not found or broken.')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init, include_subprojects=False)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
self.new_builddir()
self.init(project_dir, extra_args=['-Dsub:broken_dist_script=false'])
self._run(self.meson_command + ['dist', '--include-subprojects'], workdir=self.builddir)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def create_dummy_subproject(self, project_dir, name):
path = os.path.join(project_dir, 'subprojects', name)
os.makedirs(path)
with open(os.path.join(path, 'meson.build'), 'w') as ofile:
ofile.write(f"project('{name}', version: '1.0')")
return path
def dist_impl(self, vcs_init, vcs_add_all=None, include_subprojects=True):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write(textwrap.dedent('''\
project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
subproject('vcssub', required : false)
subproject('tarballsub', required : false)
subproject('samerepo', required : false)
'''))
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write(textwrap.dedent('''\
#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
'''))
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
if include_subprojects:
vcs_init(self.create_dummy_subproject(project_dir, 'vcssub'))
self.create_dummy_subproject(project_dir, 'tarballsub')
self.create_dummy_subproject(project_dir, 'unusedsub')
if vcs_add_all:
vcs_add_all(self.create_dummy_subproject(project_dir, 'samerepo'))
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
if include_subprojects:
# Verify that without --include-subprojects we have files from
# the main project and also files from subprojects part of the
# main vcs repository.
z = zipfile.ZipFile(zip_distfile)
expected = ['disttest-1.4.3/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c']
if vcs_add_all:
expected += ['disttest-1.4.3/subprojects/',
'disttest-1.4.3/subprojects/samerepo/',
'disttest-1.4.3/subprojects/samerepo/meson.build']
self.assertEqual(sorted(expected),
sorted(z.namelist()))
# Verify that with --include-subprojects we now also have files
# from tarball and separate vcs subprojects. But not files from
# unused subprojects.
self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'],
workdir=self.builddir)
z = zipfile.ZipFile(zip_distfile)
expected += ['disttest-1.4.3/subprojects/tarballsub/',
'disttest-1.4.3/subprojects/tarballsub/meson.build',
'disttest-1.4.3/subprojects/vcssub/',
'disttest-1.4.3/subprojects/vcssub/meson.build']
self.assertEqual(sorted(expected),
sorted(z.namelist()))
if vcs_add_all:
# Verify we can distribute separately subprojects in the same vcs
# repository as the main project.
subproject_dir = os.path.join(project_dir, 'subprojects', 'samerepo')
self.new_builddir()
self.init(subproject_dir)
self.build('dist')
xz_distfile = os.path.join(self.distdir, 'samerepo-1.0.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
tar = tarfile.open(xz_distfile, "r:xz")
self.assertEqual(sorted(['samerepo-1.0',
'samerepo-1.0/meson.build']),
sorted([i.name for i in tar]))
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '40 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, f'Rpath could not be determined for {each}.')
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '151 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '151 reserved targets')
targets = mesonbuild.coredata.FORBIDDEN_TARGET_NAMES
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
@mock.patch.dict(os.environ)
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix(), '-DLIBFOO']
# pkg-config and pkgconf does not respect the same order
self.assertEqual(sorted(foo_dep.get_compile_args()), sorted(cargs))
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '41 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
# When running under Travis Mac CI, the file updates seem to happen
# too fast so the timestamps do not get properly updated.
# Call this method before file operations in appropriate places
# to make things work.
def mac_ci_delay(self):
if is_osx() and is_ci():
import time
time.sleep(1)
def test_options_with_choices_changing(self) -> None:
"""Detect when options like arrays or combos have their choices change."""
testdir = Path(os.path.join(self.unit_test_dir, '85 change option choices'))
options1 = str(testdir / 'meson_options.1.txt')
options2 = str(testdir / 'meson_options.2.txt')
# Test that old options are changed to the new defaults if they are not valid
real_options = str(testdir / 'meson_options.txt')
self.addCleanup(os.unlink, real_options)
shutil.copy(options1, real_options)
self.init(str(testdir))
self.mac_ci_delay()
shutil.copy(options2, real_options)
self.build()
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == 'combo':
self.assertEqual(item['value'], 'b')
self.assertEqual(item['choices'], ['b', 'c', 'd'])
elif item['name'] == 'arr':
self.assertEqual(item['value'], ['b'])
self.assertEqual(item['choices'], ['b', 'c', 'd'])
self.wipe()
self.mac_ci_delay()
# When the old options are valid they should remain
shutil.copy(options1, real_options)
self.init(str(testdir), extra_args=['-Dcombo=c', '-Darray=b,c'])
self.mac_ci_delay()
shutil.copy(options2, real_options)
self.build()
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == 'combo':
self.assertEqual(item['value'], 'c')
self.assertEqual(item['choices'], ['b', 'c', 'd'])
elif item['name'] == 'arr':
self.assertEqual(item['value'], ['b', 'c'])
self.assertEqual(item['choices'], ['b', 'c', 'd'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'],
cwd=workdir,
stdout=subprocess.DEVNULL)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
self.new_builddir()
out = self.init(tdir, workdir=wd)
expected = os.path.join(relpath(tdir, self.src_root), 'meson.build')
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, out)
def test_error_location_path(self):
'''Test locations in meson errors contain correct paths'''
# this list contains errors from all the different steps in the
# lexer/parser/interpreter we have tests for.
for (t, f) in [
('10 out of bounds', 'meson.build'),
('18 wrong plusassign', 'meson.build'),
('61 bad option argument', 'meson_options.txt'),
('102 subdir parse error', os.path.join('subdir', 'meson.build')),
('103 invalid option file', 'meson_options.txt'),
]:
tdir = os.path.join(self.src_root, 'test cases', 'failing', t)
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
try:
self.init(tdir, workdir=wd)
except subprocess.CalledProcessError as e:
expected = os.path.join('test cases', 'failing', t, f)
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, e.output)
else:
self.fail('configure unexpectedly succeeded')
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
for l in ['cpp', 'cs', 'd', 'java', 'cuda', 'fortran', 'objc', 'objcpp', 'rust']:
try:
comp = env.detect_compiler_for(l, MachineChoice.HOST)
with tempfile.TemporaryDirectory() as d:
comp.sanity_check(d, env)
langs.append(l)
except EnvironmentException:
pass
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in {'c', 'cpp', 'd'}:
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main(void) {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
elif lang in {'java'}:
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'Foo.' + lang), 'w') as f:
f.write('public class Foo { public static void main() {} }')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '173 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '182 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('WARNING: target links against shared modules. This is not '
'recommended as it is not supported on some platforms')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception(f'Missing {arg} value?')
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception(f'Missing {arg} value?')
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args that affect the
# configuration, and as a bonus, test that --profile-self works.
out = self.init(testdir, extra_args=['--profile-self', '--fatal-meson-warnings'])
self.assertNotIn('[default: true]', out)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'static')
self.assertEqual(obj.options[OptionKey('warning_level')].value, '1')
self.assertEqual(obj.options[OptionKey('set_sub_opt')].value, True)
self.assertEqual(obj.options[OptionKey('subp_opt', 'subp')].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
else:
self.assertIn('as both', str(cm.exception))
self.init(testdir)
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
else:
self.assertIn('as both', str(cm.exception))
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
msg = "Option 'foo' must have a value separated by equals sign."
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.init(testdir, extra_args=['-Dfoo'])
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn(msg, cm.exception.output)
else:
self.assertIn(msg, str(cm.exception))
self.init(testdir)
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.setconf('-Dfoo')
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn(msg, cm.exception.output)
else:
self.assertIn(msg, str(cm.exception))
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('subp_opt', 'subp')].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['foo bar', 'one', 'two'])
self.wipe()
self.init(testdir, extra_args=['-Dset_percent_opt=myoption%', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('set_percent_opt')].value, 'myoption%')
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar',
'-Db_lundef=false', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('bindir')].value, 'bar')
self.assertEqual(obj.options[OptionKey('buildtype')].value, 'release')
self.assertEqual(obj.options[OptionKey('b_sanitize')].value, 'thread')
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('bindir')].value, 'foo')
self.assertEqual(obj.options[OptionKey('buildtype')].value, 'plain')
self.assertEqual(obj.options[OptionKey('b_sanitize')].value, 'address')
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '208 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\| WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\| WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\| WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['optimization'], '0')
self.setconf('-Doptimization=g')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['optimization'], 'g')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = '{}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_pkg_config_libdir(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = 'pkg-config'
[properties]
pkg_config_libdir = ['{}']
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '158 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '34 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), {'meson.build'})
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '41 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), {'meson_options.txt', 'meson.build'})
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '44 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), {'meson_options.txt', 'meson.build'})
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = {f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files']}
self.assertEqual(subproject_files, {'subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build'})
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '99 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': '1.0'
},
{
'descriptive_name': 'sub_implicit',
'name': 'sub_implicit',
'version': '1.0',
},
{
'descriptive_name': 'sub-novar',
'name': 'sub_novar',
'version': '1.0',
},
{
'descriptive_name': 'subsub',
'name': 'subsub',
'version': 'undefined'
},
{
'descriptive_name': 'subsubsub',
'name': 'subsubsub',
'version': 'undefined'
},
]
}
res['subprojects'] = sorted(res['subprojects'], key=lambda i: i['name'])
self.assertDictEqual(expected, res)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '43 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '76 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '76 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'Clang-format is for now only supported on Ninja, not {self.backend.name}')
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'Clang-tidy is for now only supported on Ninja, not {self.backend.name}')
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
dummydir = os.path.join(testdir, 'dummydir.h')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
self.assertNotIn(dummydir, out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
# XXX: These now generate in a different order, is that okay?
self.assertListEqual(sorted(res_nb, key=lambda x: x['name']), sorted(res_wb, key=lambda x: x['name']))
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_buildoptions_cross_only(self):
testdir = os.path.join(self.unit_test_dir, '84 cross only introspect')
testfile = os.path.join(testdir, 'meson.build')
res = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
optnames = [o['name'] for o in res]
self.assertIn('c_args', optnames)
self.assertNotIn('build.c_args', optnames)
def test_introspect_json_flat(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
out = self.init(testdir, extra_args=['-Dlayout=flat'])
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
with open(os.path.join(infodir, 'intro-targets.json')) as fp:
targets = json.load(fp)
for i in targets:
for out in i['filename']:
assert(os.path.relpath(out, self.builddir).startswith('meson-out'))
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj, strict: bool = True):
for i in key_type_list:
if isinstance(i[1], (list, tuple)) and None in i[1]:
i = (i[0], tuple([x for x in i[1] if x is not None]))
if i[0] not in obj or obj[i[0]] is None:
continue
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
if strict:
for k in obj.keys():
found = False
for i in key_type_list:
if k == i[0]:
found = True
break
self.assertTrue(found, f'Key "{k}" not in expected list')
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
('protocol', str),
('depends', list),
('workdir', (str, None)),
('priority', int),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
('choices', (list, None)),
('value', (str, int, bool, list)),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('version', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('extra_files', list),
('subproject', (str, None)),
('install_filename', (list, None)),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr) as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Match target ids to input and output files for ease of reference
src_to_id = {}
out_to_id = {}
for i in res['targets']:
print(json.dump(i, sys.stdout))
out_to_id.update({os.path.relpath(out, self.builddir): i['id']
for out in i['filename']})
for group in i['target_sources']:
src_to_id.update({os.path.relpath(src, testdir): i['id']
for src in group['sources']})
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
deps_to_find = {'test case 1': [src_to_id['t1.cpp']],
'test case 2': [src_to_id['t2.cpp'], src_to_id['t3.cpp']],
'benchmark 1': [out_to_id['file2'], src_to_id['t3.cpp']]}
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertEqual(sorted(i['depends']),
sorted(deps_to_find[i['name']]))
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i, strict=False)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, f'intro-{i}.json')
self.assertPathExists(curr)
with open(curr) as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile) as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile) as fp:
res1 = json.load(fp)
for i in res1:
if i['name'] == 'cpp_std':
i['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
i['value'] = 'c++14'
if i['name'] == 'buildtype':
i['value'] = 'release'
if i['name'] == 'optimization':
i['value'] = '3'
if i['name'] == 'debug':
i['value'] = False
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
with open(introfile) as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile) as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
res_wb = [i for i in res_wb if i['type'] != 'custom']
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_ast_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--ast'] + self.meson_args)
node_counter = {}
def accept_node(json_node):
self.assertIsInstance(json_node, dict)
for i in ['lineno', 'colno', 'end_lineno', 'end_colno']:
self.assertIn(i, json_node)
self.assertIsInstance(json_node[i], int)
self.assertIn('node', json_node)
n = json_node['node']
self.assertIsInstance(n, str)
self.assertIn(n, nodes)
if n not in node_counter:
node_counter[n] = 0
node_counter[n] = node_counter[n] + 1
for nodeDesc in nodes[n]:
key = nodeDesc[0]
func = nodeDesc[1]
self.assertIn(key, json_node)
if func is None:
tp = nodeDesc[2]
self.assertIsInstance(json_node[key], tp)
continue
func(json_node[key])
def accept_node_list(node_list):
self.assertIsInstance(node_list, list)
for i in node_list:
accept_node(i)
def accept_kwargs(kwargs):
self.assertIsInstance(kwargs, list)
for i in kwargs:
self.assertIn('key', i)
self.assertIn('val', i)
accept_node(i['key'])
accept_node(i['val'])
nodes = {
'BooleanNode': [('value', None, bool)],
'IdNode': [('value', None, str)],
'NumberNode': [('value', None, int)],
'StringNode': [('value', None, str)],
'ContinueNode': [],
'BreakNode': [],
'ArgumentNode': [('positional', accept_node_list), ('kwargs', accept_kwargs)],
'ArrayNode': [('args', accept_node)],
'DictNode': [('args', accept_node)],
'EmptyNode': [],
'OrNode': [('left', accept_node), ('right', accept_node)],
'AndNode': [('left', accept_node), ('right', accept_node)],
'ComparisonNode': [('left', accept_node), ('right', accept_node), ('ctype', None, str)],
'ArithmeticNode': [('left', accept_node), ('right', accept_node), ('op', None, str)],
'NotNode': [('right', accept_node)],
'CodeBlockNode': [('lines', accept_node_list)],
'IndexNode': [('object', accept_node), ('index', accept_node)],
'MethodNode': [('object', accept_node), ('args', accept_node), ('name', None, str)],
'FunctionNode': [('args', accept_node), ('name', None, str)],
'AssignmentNode': [('value', accept_node), ('var_name', None, str)],
'PlusAssignmentNode': [('value', accept_node), ('var_name', None, str)],
'ForeachClauseNode': [('items', accept_node), ('block', accept_node), ('varnames', None, list)],
'IfClauseNode': [('ifs', accept_node_list), ('else', accept_node)],
'IfNode': [('condition', accept_node), ('block', accept_node)],
'UMinusNode': [('right', accept_node)],
'TernaryNode': [('condition', accept_node), ('true', accept_node), ('false', accept_node)],
}
accept_node(res_nb)
for n, c in [('ContinueNode', 2), ('BreakNode', 1), ('NotNode', 3)]:
self.assertIn(n, node_counter)
self.assertEqual(node_counter[n], c)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'version': ['>=1.2.3'],
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'version': ['>=1.0.0', '<=99.9.9'],
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest(f'Skipping alias_target test with {self.backend.name} backend')
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
def test_summary(self):
testdir = os.path.join(self.unit_test_dir, '73 summary')
out = self.init(testdir)
expected = textwrap.dedent(r'''
Some Subproject 2.0
string : bar
integer: 1
boolean: True
My Project 1.0
Configuration
Some boolean : False
Another boolean: True
Some string : Hello World
A list : string
1
True
empty list :
enabled_opt : enabled
A number : 1
yes : YES
no : NO
coma list : a, b, c
Stuff
missing prog : NO
existing prog : ''' + sys.executable + '''
missing dep : NO
internal dep : YES
Plugins
long coma list : alpha, alphacolor, apetag, audiofx, audioparsers, auparse,
autodetect, avi
Subprojects
sub : YES
sub2 : NO Problem encountered: This subproject failed
''')
expected_lines = expected.split('\n')[1:]
out_start = out.find(expected_lines[0])
out_lines = out[out_start:].split('\n')[:len(expected_lines)]
if sys.version_info < (3, 7, 0):
# Dictionary order is not stable in Python <3.7, so sort the lines
# while comparing
self.assertEqual(sorted(expected_lines), sorted(out_lines))
else:
self.assertEqual(expected_lines, out_lines)
def test_meson_compile(self):
"""Test the meson compile command."""
def get_exe_name(basename: str) -> str:
if is_windows():
return f'{basename}.exe'
else:
return basename
def get_shared_lib_name(basename: str) -> str:
if mesonbuild.environment.detect_msys2_arch():
return f'lib{basename}.dll'
elif is_windows():
return f'{basename}.dll'
elif is_cygwin():
return f'cyg{basename}.dll'
elif is_osx():
return f'lib{basename}.dylib'
else:
return f'lib{basename}.so'
def get_static_lib_name(basename: str) -> str:
return f'lib{basename}.a'
# Base case (no targets or additional arguments)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
self._run([*self.meson_command, 'compile', '-C', self.builddir])
self.assertPathExists(os.path.join(self.builddir, get_exe_name('trivialprog')))
# `--clean`
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--clean'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
# Target specified in a project with unique names
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--wipe'])
# Multiple targets and target type specified
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'mylib', 'mycpplib:shared_library'])
# Check that we have a shared lib, but not an executable, i.e. check that target actually worked
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mylib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('prog')))
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mycpplib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('cppprog')))
# Target specified in a project with non unique names
testdir = os.path.join(self.common_test_dir, '186 same target name')
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir, './foo'])
self.assertPathExists(os.path.join(self.builddir, get_static_lib_name('foo')))
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'sub/foo'])
self.assertPathExists(os.path.join(self.builddir, 'sub', get_static_lib_name('foo')))
# run_target
testdir = os.path.join(self.common_test_dir, '52 run target')
self.init(testdir, extra_args=['--wipe'])
out = self._run([*self.meson_command, 'compile', '-C', self.builddir, 'py3hi'])
self.assertIn('I am Python3.', out)
# `--$BACKEND-args`
testdir = os.path.join(self.common_test_dir, '1 trivial')
if self.backend is Backend.ninja:
self.init(testdir, extra_args=['--wipe'])
# Dry run - should not create a program
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--ninja-args=-n'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
elif self.backend is Backend.vs:
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir])
# Explicitly clean the target through msbuild interface
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--vs-args=-t:{}:Clean'.format(re.sub(r'[\%\$\@\;\.\(\)\']', '_', get_exe_name('trivialprog')))])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
def test_spurious_reconfigure_built_dep_file(self):
testdir = os.path.join(self.unit_test_dir, '75 dep files')
# Regression test: Spurious reconfigure was happening when build
# directory is inside source directory.
# See https://gitlab.freedesktop.org/gstreamer/gst-build/-/issues/85.
srcdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, srcdir)
builddir = os.path.join(srcdir, '_build')
self.change_builddir(builddir)
self.init(srcdir)
self.build()
# During first configure the file did not exist so no dependency should
# have been set. A rebuild should not trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
self.init(srcdir, extra_args=['--reconfigure'])
# During the reconfigure the file did exist, but is inside build
# directory, so no dependency should have been set. A rebuild should not
# trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
def _test_junit(self, case: str) -> None:
try:
import lxml.etree as et
except ImportError:
raise unittest.SkipTest('lxml required, but not found.')
schema = et.XMLSchema(et.parse(str(Path(__file__).parent / 'data' / 'schema.xsd')))
self.init(case)
self.run_tests()
junit = et.parse(str(Path(self.builddir) / 'meson-logs' / 'testlog.junit.xml'))
try:
schema.assertValid(junit)
except et.DocumentInvalid as e:
self.fail(e.error_log)
def test_junit_valid_tap(self):
self._test_junit(os.path.join(self.common_test_dir, '207 tap tests'))
def test_junit_valid_exitcode(self):
self._test_junit(os.path.join(self.common_test_dir, '42 test args'))
def test_junit_valid_gtest(self):
self._test_junit(os.path.join(self.framework_test_dir, '2 gtest'))
def test_link_language_linker(self):
# TODO: there should be some way to query how we're linking things
# without resorting to reading the ninja.build file
if self.backend is not Backend.ninja:
raise unittest.SkipTest('This test reads the ninja file')
testdir = os.path.join(self.common_test_dir, '226 link language')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, encoding='utf-8') as f:
contents = f.read()
self.assertRegex(contents, r'build main(\.exe)?.*: c_LINKER')
self.assertRegex(contents, r'build (lib|cyg)?mylib.*: c_LINKER')
def test_commands_documented(self):
'''
Test that all listed meson commands are documented in Commands.md.
'''
# The docs directory is not in release tarballs.
if not os.path.isdir('docs'):
raise unittest.SkipTest('Doc directory does not exist.')
doc_path = 'docs/markdown/Commands.md'
md = None
with open(doc_path, encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
## Get command sections
section_pattern = re.compile(r'^### (.+)$', re.MULTILINE)
md_command_section_matches = [i for i in section_pattern.finditer(md)]
md_command_sections = dict()
for i, s in enumerate(md_command_section_matches):
section_end = len(md) if i == len(md_command_section_matches) - 1 else md_command_section_matches[i + 1].start()
md_command_sections[s.group(1)] = (s.start(), section_end)
## Validate commands
md_commands = {k for k,v in md_command_sections.items()}
help_output = self._run(self.meson_command + ['--help'])
help_commands = {c.strip() for c in re.findall(r'usage:(?:.+)?{((?:[a-z]+,*)+?)}', help_output, re.MULTILINE|re.DOTALL)[0].split(',')}
self.assertEqual(md_commands | {'help'}, help_commands, f'Doc file: `{doc_path}`')
## Validate that each section has proper placeholders
def get_data_pattern(command):
return re.compile(
r'{{ ' + command + r'_usage.inc }}[\r\n]'
r'.*?'
r'{{ ' + command + r'_arguments.inc }}[\r\n]',
flags = re.MULTILINE|re.DOTALL)
for command in md_commands:
m = get_data_pattern(command).search(md, pos=md_command_sections[command][0], endpos=md_command_sections[command][1])
self.assertIsNotNone(m, f'Command `{command}` is missing placeholders for dynamic data. Doc file: `{doc_path}`')
def _check_coverage_files(self, types=('text', 'xml', 'html')):
covdir = Path(self.builddir) / 'meson-logs'
files = []
if 'text' in types:
files.append('coverage.txt')
if 'xml' in types:
files.append('coverage.xml')
if 'html' in types:
files.append('coveragereport/index.html')
for f in files:
self.assertTrue((covdir / f).is_file(), msg=f'{f} is not a file')
def test_coverage(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_complex(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '106 generatorcustom')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_html(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
self._check_coverage_files(['html'])
def test_coverage_text(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-text')
self._check_coverage_files(['text'])
def test_coverage_xml(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-xml')
self._check_coverage_files(['xml'])
def test_cross_file_constants(self):
with temp_filename() as crossfile1, temp_filename() as crossfile2:
with open(crossfile1, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
compiler = 'gcc'
'''))
with open(crossfile2, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
toolchain = '/toolchain/'
common_flags = ['--sysroot=' + toolchain / 'sysroot']
[properties]
c_args = common_flags + ['-DSOMETHING']
cpp_args = c_args + ['-DSOMETHING_ELSE']
[binaries]
c = toolchain / compiler
'''))
values = mesonbuild.coredata.parse_machine_files([crossfile1, crossfile2])
self.assertEqual(values['binaries']['c'], '/toolchain/gcc')
self.assertEqual(values['properties']['c_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING'])
self.assertEqual(values['properties']['cpp_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING', '-DSOMETHING_ELSE'])
@unittest.skipIf(is_windows(), 'Directory cleanup fails for some reason')
def test_wrap_git(self):
with tempfile.TemporaryDirectory() as tmpdir:
srcdir = os.path.join(tmpdir, 'src')
shutil.copytree(os.path.join(self.unit_test_dir, '82 wrap-git'), srcdir)
upstream = os.path.join(srcdir, 'subprojects', 'wrap_git_upstream')
upstream_uri = Path(upstream).as_uri()
_git_init(upstream)
with open(os.path.join(srcdir, 'subprojects', 'wrap_git.wrap'), 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = {}
patch_directory = wrap_git_builddef
revision = master
'''.format(upstream_uri)))
self.init(srcdir)
self.build()
self.run_tests()
def test_multi_output_custom_target_no_warning(self):
testdir = os.path.join(self.common_test_dir, '229 custom_target source')
out = self.init(testdir)
self.assertNotRegex(out, 'WARNING:.*Using the first one.')
self.build()
self.run_tests()
@unittest.skipUnless(is_linux() and (re.search('^i.86$|^x86$|^x64$|^x86_64$|^amd64$', platform.processor()) is not None),
'Requires ASM compiler for x86 or x86_64 platform currently only available on Linux CI runners')
def test_nostdlib(self):
testdir = os.path.join(self.unit_test_dir, '79 nostdlib')
machinefile = os.path.join(self.builddir, 'machine.txt')
with open(machinefile, 'w') as f:
f.write(textwrap.dedent('''
[properties]
c_stdlib = 'mylibc'
'''))
# Test native C stdlib
self.meson_native_file = machinefile
self.init(testdir)
self.build()
# Test cross C stdlib
self.new_builddir()
self.meson_native_file = None
self.meson_cross_file = machinefile
self.init(testdir)
self.build()
def test_meson_version_compare(self):
testdir = os.path.join(self.unit_test_dir, '83 meson version compare')
out = self.init(testdir)
self.assertNotRegex(out, r'WARNING')
def test_wrap_redirect(self):
redirect_wrap = os.path.join(self.builddir, 'redirect.wrap')
real_wrap = os.path.join(self.builddir, 'foo/subprojects/real.wrap')
os.makedirs(os.path.dirname(real_wrap))
# Invalid redirect, filename must have .wrap extension
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/subprojects/real.wrapper
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be a .wrap file'):
PackageDefinition(redirect_wrap)
# Invalid redirect, filename cannot be in parent directory
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = ../real.wrap
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename cannot contain ".."'):
PackageDefinition(redirect_wrap)
# Invalid redirect, filename must be in foo/subprojects/real.wrap
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/real.wrap
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be in the form foo/subprojects/bar.wrap'):
wrap = PackageDefinition(redirect_wrap)
# Correct redirect
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/subprojects/real.wrap
'''))
with open(real_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = http://invalid
'''))
wrap = PackageDefinition(redirect_wrap)
self.assertEqual(wrap.get('url'), 'http://invalid')
@skip_if_no_cmake
def test_nested_cmake_rebuild(self) -> None:
# This checks a bug where if a non-meson project is used as a third
# level (or deeper) subproject it doesn't cause a rebuild if the build
# files for that project are changed
testdir = os.path.join(self.unit_test_dir, '86 nested subproject regenerate depends')
cmakefile = Path(testdir) / 'subprojects' / 'sub2' / 'CMakeLists.txt'
self.init(testdir)
self.build()
with cmakefile.open('a') as f:
os.utime(str(cmakefile))
self.assertReconfiguredBuildIsNoop()
def test_version_file(self):
srcdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(srcdir)
projinfo = self.introspect('--projectinfo')
self.assertEqual(projinfo['version'], '1.0.0')
def test_cflags_cppflags(self):
envs = {'CPPFLAGS': '-DCPPFLAG',
'CFLAGS': '-DCFLAG',
'CXXFLAGS': '-DCXXFLAG'}
srcdir = os.path.join(self.unit_test_dir, '90 multiple envvars')
self.init(srcdir, override_envvars=envs)
self.build()
def test_build_b_options(self) -> None:
# Currently (0.57) these do nothing, but they've always been allowed
srcdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(srcdir, extra_args=['-Dbuild.b_lto=true'])
def test_install_skip_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '91 install skip subprojects')
self.init(testdir)
self.build()
main_expected = [
'',
'share',
'include',
'foo',
'bin',
'share/foo',
'share/foo/foo.dat',
'include/foo.h',
'foo/foofile',
'bin/foo' + exe_suffix,
]
bar_expected = [
'bar',
'share/foo/bar.dat',
'include/bar.h',
'bin/bar' + exe_suffix,
'bar/barfile'
]
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() == 'msvc':
main_expected.append('bin/foo.pdb')
bar_expected.append('bin/bar.pdb')
prefix = destdir_join(self.installdir, self.prefix)
main_expected = [Path(prefix, p) for p in main_expected]
bar_expected = [Path(prefix, p) for p in bar_expected]
all_expected = main_expected + bar_expected
def check_installed_files(extra_args, expected):
args = ['install', '--destdir', self.installdir] + extra_args
self._run(self.meson_command + args, workdir=self.builddir)
all_files = [p for p in Path(self.installdir).rglob('*')]
self.assertEqual(sorted(expected), sorted(all_files))
windows_proof_rmtree(self.installdir)
check_installed_files([], all_expected)
check_installed_files(['--skip-subprojects'], main_expected)
check_installed_files(['--skip-subprojects', 'bar'], main_expected)
check_installed_files(['--skip-subprojects', 'another'], all_expected)
def test_adding_subproject_to_configure_project(self) -> None:
srcdir = os.path.join(self.unit_test_dir, '92 new subproject in configured project')
self.init(srcdir)
self.build()
self.setconf('-Duse-sub=true')
self.build()
def test_devenv(self):
testdir = os.path.join(self.unit_test_dir, '91 devenv')
self.init(testdir)
self.build()
cmd = self.meson_command + ['devenv', '-C', self.builddir]
script = os.path.join(testdir, 'test-devenv.py')
app = os.path.join(self.builddir, 'app')
self._run(cmd + python_command + [script])
self.assertEqual('This is text.', self._run(cmd + [app]).strip())
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'Skipping clang-format tests with {self.backend.name} backend')
if not shutil.which('clang-format'):
raise unittest.SkipTest('clang-format not found')
testdir = os.path.join(self.unit_test_dir, '93 clangformat')
newdir = os.path.join(self.builddir, 'testdir')
shutil.copytree(testdir, newdir)
self.new_builddir()
self.init(newdir)
# Should reformat 1 file but not return error
output = self.build('clang-format')
self.assertEqual(1, output.count('File reformatted:'))
# Reset source tree then try again with clang-format-check, it should
# return an error code this time.
windows_proof_rmtree(newdir)
shutil.copytree(testdir, newdir)
with self.assertRaises(subprocess.CalledProcessError):
output = self.build('clang-format-check')
self.assertEqual(1, output.count('File reformatted:'))
# All code has been reformatted already, so it should be no-op now.
output = self.build('clang-format')
self.assertEqual(0, output.count('File reformatted:'))
self.build('clang-format-check')
def test_custom_target_implicit_include(self):
testdir = os.path.join(self.unit_test_dir, '94 custominc')
self.init(testdir)
self.build()
compdb = self.get_compdb()
matches = 0
for c in compdb:
if 'prog.c' in c['file']:
self.assertNotIn('easytogrepfor', c['command'])
matches += 1
self.assertEqual(matches, 1)
matches = 0
for c in compdb:
if 'prog2.c' in c['file']:
self.assertIn('easytogrepfor', c['command'])
matches += 1
self.assertEqual(matches, 1)
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(f", meson_version: '{meson_version}'")
f.write(")\n")
for lang in langs:
f.write(f"add_languages('{lang}', required : false)\n")
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(f", meson_version: '{meson_version}'")
f.write(")\n")
for lang in langs:
f.write(f"add_languages('{lang}', required : false)\n")
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "Item must be a list or one of <class 'str'>"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
f"(requires a Objc compiler|{self.dnf})",
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
f"(required.*fail|{self.dnf})")
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
f"(fail.*not found|{self.dnf})")
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
f"(boost_root.*absolute|{self.dnf})",
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. A wrap file from a subproject is used but fails because it does not
contain required keys.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
stray_file = os.path.join(tdir, 'subprojects/subsubproject.wrap')
if os.path.exists(stray_file):
windows_proof_rm(stray_file)
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Neither a subproject directory nor a .*nosubproj.wrap.* file was found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'Dependency .*somenotfounddep.* from subproject .*subprojects/somesubproj.* found: .*NO.*')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
self.assertRegex(out, r'Missing key .*source_filename.* in subsubproject.wrap')
windows_proof_rm(stray_file)
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
def test_assert_default_message(self):
self.assertMesonRaises("k1 = 'a'\n" +
"assert({\n" +
" k1: 1,\n" +
"}['a'] == 2)\n",
r"Assert failed: {k1 : 1}\['a'\] == 2")
def test_wrap_nofallback(self):
self.assertMesonRaises("dependency('notfound', fallback : ['foo', 'foo_dep'])",
r"Dependency \'notfound\' not found and fallback is disabled",
extra_args=['--wrap-mode=nofallback'])
def test_message(self):
self.assertMesonOutputs("message('Array:', ['a', 'b'])",
r"Message:.* Array: \['a', 'b'\]")
def test_warning(self):
self.assertMesonOutputs("warning('Array:', ['a', 'b'])",
r"WARNING:.* Array: \['a', 'b'\]")
def test_override_dependency_twice(self):
self.assertMesonRaises("meson.override_dependency('foo', declare_dependency())\n" +
"meson.override_dependency('foo', declare_dependency())",
"""Tried to override dependency 'foo' which has already been resolved or overridden""")
@unittest.skipIf(is_windows(), 'zlib is not available on Windows')
def test_override_resolved_dependency(self):
self.assertMesonRaises("dependency('zlib')\n" +
"meson.override_dependency('zlib', declare_dependency())",
"""Tried to override dependency 'zlib' which has already been resolved or overridden""")
def test_error_func(self):
self.assertMesonRaises("error('a', 'b', ['c', ['d', {'e': 'f'}]], 'g')",
r"Problem encountered: a b \['c', \['d', {'e' : 'f'}\]\] g")
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
@mock.patch.dict(os.environ)
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd.exe with args without searching
prog = ExternalProgram('cmd', command=['cmd', '/C'])
self.assertTrue(prog.found(), msg='cmd not found with args')
self.assertPathEqual(prog.get_command()[0], 'cmd')
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg=f'{cmd_path!r} not found')
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# If `.PY` is in PATHEXT, scripts can be found as programs
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
# Finding a script in PATH w/o extension works and adds the interpreter
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script with an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script-ext.py', command=[os.path.join(testdir, 'test-script-ext.py'), '--help'])
self.assertTrue(prog.found(), msg='test-script-ext.py with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script without an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script', command=[os.path.join(testdir, 'test-script'), '--help'])
self.assertTrue(prog.found(), msg='test-script with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script')
# Ensure that WindowsApps gets removed from PATH
path = os.environ['PATH']
if 'WindowsApps' not in path:
username = os.environ['USERNAME']
appstore_dir = fr'C:\Users\{username}\AppData\Local\Microsoft\WindowsApps'
path = os.pathsep + appstore_dir
path = ExternalProgram._windows_sanitize_path(path)
self.assertNotIn('WindowsApps', path)
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
def _check_ld(self, name: str, lang: str, expected: str) -> None:
if not shutil.which(name):
raise unittest.SkipTest(f'Could not find {name}.')
envvars = [mesonbuild.envconfig.ENV_VAR_PROG_MAP[f'{lang}_ld']]
# Also test a deprecated variable if there is one.
if f'{lang}_ld' in mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP:
envvars.append(
mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP[f'{lang}_ld'])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
try:
comp = getattr(env, f'detect_{lang}_compiler')(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest(f'Could not find a compiler for {lang}')
self.assertEqual(comp.linker.id, expected)
def test_link_environment_variable_lld_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('lld-link', 'c', 'lld-link')
def test_link_environment_variable_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('link', 'c', 'link')
def test_link_environment_variable_optlink(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('optlink', 'c', 'optlink')
@skip_if_not_language('rust')
def test_link_environment_variable_rust(self):
self._check_ld('link', 'rust', 'link')
@skip_if_not_language('d')
def test_link_environment_variable_d(self):
env = get_fake_env()
comp = getattr(env, 'detect_d_compiler')(MachineChoice.HOST)
if comp.id == 'dmd':
raise unittest.SkipTest('meson cannot reliably make DMD use a different linker.')
self._check_ld('lld-link', 'd', 'lld-link')
def test_pefile_checksum(self):
try:
import pefile
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('pefile module not found')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--buildtype=release'])
self.build()
# Test that binaries have a non-zero checksum
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
cc_id = cc.get_id()
ld_id = cc.get_linker_id()
dll = glob(os.path.join(self.builddir, '*mycpplib.dll'))[0]
exe = os.path.join(self.builddir, 'cppprog.exe')
for f in (dll, exe):
pe = pefile.PE(f)
msg = f'PE file: {f!r}, compiler: {cc_id!r}, linker: {ld_id!r}'
if cc_id == 'clang-cl':
# Latest clang-cl tested (7.0) does not write checksums out
self.assertFalse(pe.verify_checksum(), msg=msg)
else:
# Verify that a valid checksum was written by all other compilers
self.assertTrue(pe.verify_checksum(), msg=msg)
def test_qt5dependency_vscrt(self):
'''
Test that qt5 dependencies use the debug module suffix when b_vscrt is
set to 'mdd'
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if OptionKey('b_vscrt') not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake') and not is_ci():
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output and not is_ci():
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Setup with /MDd
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
# Verify that we're linking to the debug versions of Qt DLLs
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, encoding='utf-8') as f:
contents = f.read()
m = re.search('build qt5core.exe: cpp_LINKER.*Qt5Cored.lib', contents)
self.assertIsNotNone(m, msg=contents)
def test_compiler_checks_vscrt(self):
'''
Test that the correct VS CRT is used when running compiler checks
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if OptionKey('b_vscrt') not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
def sanitycheck_vscrt(vscrt):
checks = self.get_meson_log_sanitychecks()
self.assertTrue(len(checks) > 0)
for check in checks:
self.assertIn(vscrt, check)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=debugoptimized'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=release'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=md'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mt'])
sanitycheck_vscrt('/MT')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mtd'])
sanitycheck_vscrt('/MTd')
def test_modules(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'C++ modules only work with the Ninja backend (not {self.backend.name}).')
if 'VSCMD_VER' not in os.environ:
raise unittest.SkipTest('C++ modules is only supported with Visual Studio.')
if version_compare(os.environ['VSCMD_VER'], '<16.10.0'):
raise unittest.SkipTest('C++ modules are only supported with VS 2019 Preview or newer.')
self.init(os.path.join(self.unit_test_dir, '87 cpp modules'))
self.build()
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '149 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
def test_removing_unused_linker_args(self):
testdir = os.path.join(self.common_test_dir, '105 has arg')
env = {'CFLAGS': '-L/tmp -L /var/tmp -headerpad_max_install_names -Wl,-export_dynamic -framework Foundation'}
self.init(testdir, override_envvars=env)
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
@mock.patch.dict(os.environ)
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
libhello_nolib = PkgConfigDependency('libhello_nolib', env, kwargs)
self.assertTrue(libhello_nolib.found())
self.assertEqual(libhello_nolib.get_link_args(), [])
self.assertEqual(libhello_nolib.get_compile_args(), [])
self.assertEqual(libhello_nolib.get_pkgconfig_variable('foo', {}), 'bar')
self.assertEqual(libhello_nolib.get_pkgconfig_variable('prefix', {}), self.prefix)
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
env = {
'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2]),
'PKG_CONFIG_SYSTEM_LIBRARY_PATH': '/usr/lib',
}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
# See common/45 pkgconfig-gen/meson.build for description of the case this test
with open(os.path.join(privatedir1, 'simple2.pc')) as f:
content = f.read()
self.assertIn('Libs: -L${libdir} -lsimple2 -lsimple1', content)
self.assertIn('Libs.private: -lz', content)
with open(os.path.join(privatedir1, 'simple3.pc')) as f:
content = f.read()
self.assertEqual(1, content.count('-lsimple3'))
with open(os.path.join(privatedir1, 'simple5.pc')) as f:
content = f.read()
self.assertNotIn('-lstat2', content)
@mock.patch.dict(os.environ)
def test_pkgconfig_uninstalled(self):
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
self.build()
os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(self.builddir, 'meson-uninstalled')
if is_cygwin():
os.environ['PATH'] += os.pathsep + self.builddir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen', 'dependencies')
self.init(testdir)
self.build()
self.run_tests()
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \(qmake\)\n')
def test_qt6dependency_qmake_detection(self):
'''
Test that qt6 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt6'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 6' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 6.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt6
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt6 \(modules: Core\) found: YES .* \(qmake\)\n')
def glob_sofiles_without_privdir(self, g):
files = glob(g)
return [f for f in files if not f.endswith('.p')]
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(self.glob_sofiles_without_privdir(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(self.glob_sofiles_without_privdir(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(self.glob_sofiles_without_privdir(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '37 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '37 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir: str, compiler: 'Compiler') -> None:
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_cpp20 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=10.0.0', None) or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=10.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
key = OptionKey('std', lang=compiler.language)
for v in compiler.get_options()[key].choices:
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
elif '++20' in v and not has_cpp20:
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
self.init(testdir, extra_args=[f'-D{key!s}={v}'])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = f" -std={v} "
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print(f'{key!s} was {v!r}')
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if compiler.language == 'c':
env_flag_name = 'CFLAGS'
elif compiler.language == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError(f'Language {compiler.language} not defined.')
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc)
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp)
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '43 subproject')
self.init(testdir, extra_args='--unity=subprojects')
pdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/simpletest*.p'))
self.assertEqual(len(pdirs), 1)
self.assertPathExists(os.path.join(pdirs[0], 'simpletest-unity0.c'))
sdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/*sublib*.p'))
self.assertEqual(len(sdirs), 1)
self.assertPathExists(os.path.join(sdirs[0], 'sublib-unity0.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '60 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '191 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_introspect_installed(self):
testdir = os.path.join(self.linuxlike_test_dir, '7 library versions')
self.init(testdir)
install = self.introspect('--installed')
install = {os.path.basename(k): v for k, v in install.items()}
print(install)
if is_osx():
the_truth = {
'libmodule.dylib': '/usr/lib/libmodule.dylib',
'libnoversion.dylib': '/usr/lib/libnoversion.dylib',
'libonlysoversion.5.dylib': '/usr/lib/libonlysoversion.5.dylib',
'libonlysoversion.dylib': '/usr/lib/libonlysoversion.dylib',
'libonlyversion.1.dylib': '/usr/lib/libonlyversion.1.dylib',
'libonlyversion.dylib': '/usr/lib/libonlyversion.dylib',
'libsome.0.dylib': '/usr/lib/libsome.0.dylib',
'libsome.dylib': '/usr/lib/libsome.dylib',
}
the_truth_2 = {'/usr/lib/libsome.dylib',
'/usr/lib/libsome.0.dylib',
}
else:
the_truth = {
'libmodule.so': '/usr/lib/libmodule.so',
'libnoversion.so': '/usr/lib/libnoversion.so',
'libonlysoversion.so': '/usr/lib/libonlysoversion.so',
'libonlysoversion.so.5': '/usr/lib/libonlysoversion.so.5',
'libonlyversion.so': '/usr/lib/libonlyversion.so',
'libonlyversion.so.1': '/usr/lib/libonlyversion.so.1',
'libonlyversion.so.1.4.5': '/usr/lib/libonlyversion.so.1.4.5',
'libsome.so': '/usr/lib/libsome.so',
'libsome.so.0': '/usr/lib/libsome.so.0',
'libsome.so.1.2.3': '/usr/lib/libsome.so.1.2.3',
}
the_truth_2 = {'/usr/lib/libsome.so',
'/usr/lib/libsome.so.0',
'/usr/lib/libsome.so.1.2.3'}
self.assertDictEqual(install, the_truth)
targets = self.introspect('--targets')
for t in targets:
if t['name'] != 'some':
continue
self.assertSetEqual(the_truth_2, set(t['install_filename']))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
@skipIfNoPkgconfig
def test_build_rpath_pkgconfig(self):
'''
Test that current build artefacts (libs) are found first on the rpath,
manually specified rpath comes second and additional rpath elements (from
pkg-config files) come last
'''
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '90 pkgconfig build rpath order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar:/foo/dummy')
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar:/foo/dummy')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz:/foo/dummy')
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz:/foo/dummy')
def test_global_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
if is_osx():
raise unittest.SkipTest('Global RPATHs via LDFLAGS not yet supported on MacOS (does anybody need it?)')
testdir = os.path.join(self.unit_test_dir, '81 global-rpath')
oldinstalldir = self.installdir
# Build and install an external library without DESTDIR.
# The external library generates a .pc file without an rpath.
yonder_dir = os.path.join(testdir, 'yonder')
yonder_prefix = os.path.join(oldinstalldir, 'yonder')
yonder_libdir = os.path.join(yonder_prefix, self.libdir)
self.prefix = yonder_prefix
self.installdir = yonder_prefix
self.init(yonder_dir)
self.build()
self.install(use_destdir=False)
# Since rpath has multiple valid formats we need to
# test that they are all properly used.
rpath_formats = [
('-Wl,-rpath=', False),
('-Wl,-rpath,', False),
('-Wl,--just-symbols=', True),
('-Wl,--just-symbols,', True),
('-Wl,-R', False),
('-Wl,-R,', False)
]
for rpath_format, exception in rpath_formats:
# Build an app that uses that installed library.
# Supply the rpath to the installed library via LDFLAGS
# (as systems like buildroot and guix are wont to do)
# and verify install preserves that rpath.
self.new_builddir()
env = {'LDFLAGS': rpath_format + yonder_libdir,
'PKG_CONFIG_PATH': os.path.join(yonder_libdir, 'pkgconfig')}
if exception:
with self.assertRaises(subprocess.CalledProcessError):
self.init(testdir, override_envvars=env)
continue
self.init(testdir, override_envvars=env)
self.build()
self.install(use_destdir=False)
got_rpath = get_rpath(os.path.join(yonder_prefix, 'bin/rpathified'))
self.assertEqual(got_rpath, yonder_libdir, rpath_format)
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
tool_path = os.path.join(testdir, 'some_cross_tool.py')
crossfile.write(textwrap.dedent(f'''\
[binaries]
c = '{shutil.which('gcc' if is_sunos() else 'cc')}'
ar = '{shutil.which('ar')}'
strip = '{shutil.which('strip')}'
sometool.py = ['{tool_path}']
someothertool.py = '{tool_path}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
'''))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({OptionKey('pkg_config_path'): pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_duplicate_path_entries(self):
testdir = os.path.join(self.unit_test_dir, '111 pkgconfig duplicate path entries')
pkg_dir = os.path.join(testdir, 'pkgconfig')
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({OptionKey('pkg_config_path'): pkg_dir}, subproject='')
PkgConfigDependency.setup_env({}, env, MachineChoice.HOST, pkg_dir)
pkg_config_path = env.coredata.options[OptionKey('pkg_config_path')].value
self.assertTrue(len(pkg_config_path) == 1)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '67 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.src_root, 'test cases', 'native', '9 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not (is_osx() or is_linux()):
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
if not is_osx():
# Rest of the workflow only works on macOS
return
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
@skipIfNoPkgconfig
def test_usage_pkgconfig_prefixes(self):
'''
Build and install two external libraries, to different prefixes,
then build and install a client program that finds them via pkgconfig,
and verify the installed client program runs.
'''
oldinstalldir = self.installdir
# Build and install both external libraries without DESTDIR
val1dir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'val1')
val1prefix = os.path.join(oldinstalldir, 'val1')
self.prefix = val1prefix
self.installdir = val1prefix
self.init(val1dir)
self.build()
self.install(use_destdir=False)
self.new_builddir()
env1 = {}
env1['PKG_CONFIG_PATH'] = os.path.join(val1prefix, self.libdir, 'pkgconfig')
val2dir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'val2')
val2prefix = os.path.join(oldinstalldir, 'val2')
self.prefix = val2prefix
self.installdir = val2prefix
self.init(val2dir, override_envvars=env1)
self.build()
self.install(use_destdir=False)
self.new_builddir()
# Build, install, and run the client program
env2 = {}
env2['PKG_CONFIG_PATH'] = os.path.join(val2prefix, self.libdir, 'pkgconfig')
testdir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'client')
testprefix = os.path.join(oldinstalldir, 'client')
self.prefix = testprefix
self.installdir = testprefix
self.init(testdir, override_envvars=env2)
self.build()
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'client')
env3 = {}
if is_cygwin():
env3['PATH'] = os.path.join(val1prefix, 'bin') + \
os.pathsep + \
os.path.join(val2prefix, 'bin') + \
os.pathsep + os.environ['PATH']
out = self._run([prog], override_envvars=env3).strip()
# Expected output is val1 + val2 = 3
self.assertEqual(out, '3')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
with chdir(subdir):
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('60 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('191 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('191 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
linker = cc.linker
if not linker.export_dynamic_args(env):
raise unittest.SkipTest('Not applicable for linkers without --export-dynamic')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
nativefile = tempfile.NamedTemporaryFile(mode='w')
nativefile.write(textwrap.dedent('''\
[binaries]
c = ['{}']
'''.format(os.path.join(testdir, 'build_wrapper.py'))))
nativefile.flush()
self.meson_native_file = nativefile.name
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write(textwrap.dedent('''\
[binaries]
c = ['{}']
'''.format(os.path.join(testdir, 'host_wrapper.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir)
def test_identity_cross_env(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
env = {
'CC_FOR_BUILD': '"' + os.path.join(testdir, 'build_wrapper.py') + '"',
}
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write(textwrap.dedent('''\
[binaries]
c = ['{}']
'''.format(os.path.join(testdir, 'host_wrapper.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '68 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = [f'-Dc_link_args=-L{libdir}',
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '68 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None:
if is_sunos():
raise unittest.SkipTest('Solaris currently cannot override the linker.')
if not shutil.which(check):
raise unittest.SkipTest(f'Could not find {check}.')
envvars = [mesonbuild.envconfig.ENV_VAR_PROG_MAP[f'{lang}_ld']]
# Also test a deprecated variable if there is one.
if f'{lang}_ld' in mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP:
envvars.append(
mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP[f'{lang}_ld'])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
comp = getattr(env, f'detect_{lang}_compiler')(MachineChoice.HOST)
if isinstance(comp, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler,
mesonbuild.compilers.AppleClangObjCCompiler,
mesonbuild.compilers.AppleClangObjCPPCompiler)):
raise unittest.SkipTest('AppleClang is currently only supported with ld64')
if lang != 'rust' and comp.use_linker_args('bfd') == []:
raise unittest.SkipTest(
f'Compiler {comp.id} does not support using alternative linkers')
self.assertEqual(comp.linker.id, expected)
def test_ld_environment_variable_bfd(self):
self._check_ld('ld.bfd', 'bfd', 'c', 'ld.bfd')
def test_ld_environment_variable_gold(self):
self._check_ld('ld.gold', 'gold', 'c', 'ld.gold')
def test_ld_environment_variable_lld(self):
self._check_ld('ld.lld', 'lld', 'c', 'ld.lld')
@skip_if_not_language('rust')
@skipIfNoExecutable('ld.gold') # need an additional check here because _check_ld checks for gcc
def test_ld_environment_variable_rust(self):
self._check_ld('gcc', 'gcc -fuse-ld=gold', 'rust', 'ld.gold')
def test_ld_environment_variable_cpp(self):
self._check_ld('ld.gold', 'gold', 'cpp', 'ld.gold')
@skip_if_not_language('objc')
def test_ld_environment_variable_objc(self):
self._check_ld('ld.gold', 'gold', 'objc', 'ld.gold')
@skip_if_not_language('objcpp')
def test_ld_environment_variable_objcpp(self):
self._check_ld('ld.gold', 'gold', 'objcpp', 'ld.gold')
@skip_if_not_language('fortran')
def test_ld_environment_variable_fortran(self):
self._check_ld('ld.gold', 'gold', 'fortran', 'ld.gold')
@skip_if_not_language('d')
def test_ld_environment_variable_d(self):
# At least for me, ldc defaults to gold, and gdc defaults to bfd, so
# let's pick lld, which isn't the default for either (currently)
self._check_ld('ld.lld', 'lld', 'd', 'ld.lld')
def compute_sha256(self, filename):
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def test_wrap_with_file_url(self):
testdir = os.path.join(self.unit_test_dir, '74 wrap file url')
source_filename = os.path.join(testdir, 'subprojects', 'foo.tar.xz')
patch_filename = os.path.join(testdir, 'subprojects', 'foo-patch.tar.xz')
wrap_filename = os.path.join(testdir, 'subprojects', 'foo.wrap')
source_hash = self.compute_sha256(source_filename)
patch_hash = self.compute_sha256(patch_filename)
wrap = textwrap.dedent("""\
[wrap-file]
directory = foo
source_url = http://server.invalid/foo
source_fallback_url = file://{}
source_filename = foo.tar.xz
source_hash = {}
patch_url = http://server.invalid/foo
patch_fallback_url = file://{}
patch_filename = foo-patch.tar.xz
patch_hash = {}
""".format(source_filename, source_hash, patch_filename, patch_hash))
with open(wrap_filename, 'w') as f:
f.write(wrap)
self.init(testdir)
self.build()
self.run_tests()
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'packagecache'))
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'foo'))
os.unlink(wrap_filename)
def test_no_rpath_for_static(self):
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertIsNone(build_rpath)
def test_lookup_system_after_broken_fallback(self):
# Just to generate libfoo.pc so we can test system dependency lookup.
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
privatedir = self.privatedir
# Write test project where the first dependency() returns not-found
# because 'broken' subproject does not exit, but that should not prevent
# the 2nd dependency() to lookup on system.
self.new_builddir()
with tempfile.TemporaryDirectory() as d:
with open(os.path.join(d, 'meson.build'), 'w') as f:
f.write(textwrap.dedent('''\
project('test')
dependency('notfound', fallback: 'broken', required: false)
dependency('libfoo', fallback: 'broken', required: true)
'''))
self.init(d, override_envvars={'PKG_CONFIG_LIBDIR': privatedir})
def test_as_link_whole(self):
testdir = os.path.join(self.unit_test_dir, '78 as link whole')
self.init(testdir)
with open(os.path.join(self.privatedir, 'bar1.pc')) as f:
content = f.read()
self.assertIn('-lfoo', content)
with open(os.path.join(self.privatedir, 'bar2.pc')) as f:
content = f.read()
self.assertNotIn('-lfoo', content)
def test_prelinking(self):
# Prelinking currently only works on recently new GNU toolchains.
# Skip everything else. When support for other toolchains is added,
# remove limitations as necessary.
if is_osx():
raise unittest.SkipTest('Prelinking not supported on Darwin.')
if 'clang' in os.environ.get('CC', 'dummy'):
raise unittest.SkipTest('Prelinking not supported with Clang.')
gccver = subprocess.check_output(['cc', '--version'])
if b'7.5.0' in gccver:
raise unittest.SkipTest('GCC on Bionic is too old to be supported.')
testdir = os.path.join(self.unit_test_dir, '88 prelinking')
self.init(testdir)
self.build()
outlib = os.path.join(self.builddir, 'libprelinked.a')
ar = shutil.which('ar')
self.assertTrue(os.path.exists(outlib))
self.assertTrue(ar is not None)
p = subprocess.run([ar, 't', outlib],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
universal_newlines=True, timeout=1)
obj_files = p.stdout.strip().split('\n')
self.assertEqual(len(obj_files), 1)
self.assertTrue(obj_files[0].endswith('-prelink.o'))
class BaseLinuxCrossTests(BasePlatformTests):
# Don't pass --libdir when cross-compiling. We have tests that
# check whether meson auto-detects it correctly.
libdir = None
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_cross_libdir_subproject(self):
# Guard against a regression where calling "subproject"
# would reset the value of libdir to its default value.
testdir = os.path.join(self.unit_test_dir, '77 subdir libdir')
self.init(testdir, extra_args=['--libdir=fuf'])
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'fuf')
return
self.assertTrue(False, 'Libdir specified on command line gets reset.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def test_run_native_test(self):
'''
https://github.com/mesonbuild/meson/issues/7997
check run native test in crossbuild without exe wrapper
'''
testdir = os.path.join(self.unit_test_dir, '89 run native test')
stamp_file = os.path.join(self.builddir, 'native_test_has_run.stamp')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(stamp_file)
self.run_tests()
self.assertPathExists(stamp_file)
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
error_message = "An exe_wrapper is needed but was not found. Please define one in cross file and check the command and/or add it to PATH."
with self.assertRaises(MesonException) as cm:
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
self.assertEqual(str(cm.exception), error_message)
with self.assertRaises(MesonException) as cm:
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.assertEqual(str(cm.exception), error_message)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaises(MesonException) as cm:
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
self.assertEqual(str(cm.exception),
"The exe_wrapper defined in the cross file 'broken' was not found. Please check the command and/or add it to PATH.")
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest(f'Skipping python tests with {self.backend.name} backend')
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD', 'Boost']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': 'debug=true'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, f'generated{self.current_config}.config')
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write(f'[{section}]\n')
for k, v in entries.items():
if isinstance(v, (bool, int, float)):
f.write(f"{k}={v}\n")
elif isinstance(v, list):
f.write("{}=[{}]\n".format(k, ', '.join([f"'{w}'" for w in v])))
else:
f.write(f"{k}='{v}'\n")
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, f'binary_wrapper{self.current_wrapper}.py')
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(f' if args.{name}:\n')
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, f'binary_wrapper{self.current_wrapper}.bat')
with open(batfile, 'wt') as f:
f.write(fr'@{sys.executable} {filename} %*')
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, f'detect_{lang}_compiler')
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write(f"bash = '{wrapper}'\n")
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary, entry=None):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {entry or binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, f'-Dcase={case}'])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
elif is_osx():
binary = 'python'
else:
binary = 'python2'
# We not have python2, check for it
for v in ['2', '2.7', '-2.7']:
rc = subprocess.call(['pkg-config', '--cflags', f'python{v}'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc == 0:
break
else:
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', binary, entry='python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang: str, binary: str, version_str: str, version: str) -> None:
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, f'detect_{lang}_compiler')
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = [wrapper]
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = [wrapper]
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overridden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
def test_user_options(self):
testcase = os.path.join(self.common_test_dir, '41 options')
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0),
('CaseSenSiTivE', 'SOME other Value'),
('CASESENSITIVE', 'some other Value')]:
config = self.helper_create_native_file({'project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_user_options_command_line_overrides(self):
testcase = os.path.join(self.common_test_dir, '41 options')
config = self.helper_create_native_file({'project options': {'other_one': True}})
self.init(testcase, extra_args=['--native-file', config, '-Dother_one=false'])
def test_user_options_subproject(self):
testcase = os.path.join(self.unit_test_dir, '80 user options for subproject')
s = os.path.join(testcase, 'subprojects')
if not os.path.exists(s):
os.mkdir(s)
s = os.path.join(s, 'sub')
if not os.path.exists(s):
sub = os.path.join(self.common_test_dir, '41 options')
shutil.copytree(sub, s)
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0)]:
config = self.helper_create_native_file({'sub:project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_option_bool(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'werror': True}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'werror' in each['name']:
self.assertEqual(each['value'], True)
break
else:
self.fail('Did not find werror in build options?')
def test_option_integer(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'unity_size': 100}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'unity_size' in each['name']:
self.assertEqual(each['value'], 100)
break
else:
self.fail('Did not find unity_size in build options?')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('Did not find werror in build options?')
def test_builtin_options_conf_overrides_env(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'pkg_config_path': '/foo'}})
self.init(testcase, extra_args=['--native-file', config], override_envvars={'PKG_CONFIG_PATH': '/bar'})
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/foo'])
break
else:
self.fail('Did not find pkg_config_path in build options?')
def test_builtin_options_subprojects(self):
testcase = os.path.join(self.common_test_dir, '99 subproject subdir')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both', 'c_args': ['-Dfoo']}, 'sub:built-in options': {'default_library': 'static'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'c_args' in each['name']:
# This path will be hit twice, once for build and once for host,
self.assertEqual(each['value'], ['-Dfoo'])
found += 1
elif each['name'] == 'default_library':
self.assertEqual(each['value'], 'both')
found += 1
elif each['name'] == 'sub:default_library':
self.assertEqual(each['value'], 'static')
found += 1
self.assertEqual(found, 4, 'Did not find all three sections')
def test_builtin_options_subprojects_overrides_buildfiles(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '224 persubproject options')
config = self.helper_create_native_file({'sub2:built-in options': {'default_library': 'shared'}})
with self.assertRaises((RuntimeError, subprocess.CalledProcessError)) as cm:
self.init(testcase, extra_args=['--native-file', config])
if isinstance(cm, RuntimeError):
check = str(cm.exception)
else:
check = cm.exception.stdout
self.assertIn(check, 'Parent should override default_library')
def test_builtin_options_subprojects_dont_inherits_parent_override(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '224 persubproject options')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both'}})
self.init(testcase, extra_args=['--native-file', config])
def test_builtin_options_compiler_properties(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'c_args': ['-DFOO']},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DFOO'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_compiler_properties_legacy(self):
# The legacy placement in properties is still valid if a 'built-in
# options' setting is present, but doesn't have the lang_args
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DBAR'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_paths(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'bindir': 'foo'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'foo')
break
else:
self.fail('Did not find bindir in build options?')
def test_builtin_options_paths_legacy(self):
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'bar')
break
else:
self.fail('Did not find bindir in build options?')
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functionality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def setUp(self):
super().setUp()
self.current_config = 0
self.current_wrapper = 0
def _cross_file_generator(self, *, needs_exe_wrapper: bool = False,
exe_wrapper: T.Optional[T.List[str]] = None) -> str:
if is_windows():
raise unittest.SkipTest('Cannot run this test on non-mingw/non-cygwin windows')
return textwrap.dedent(f"""\
[binaries]
c = '{shutil.which('gcc' if is_sunos() else 'cc')}'
ar = '{shutil.which('ar')}'
strip = '{shutil.which('strip')}'
exe_wrapper = {str(exe_wrapper) if exe_wrapper is not None else '[]'}
[properties]
needs_exe_wrapper = {needs_exe_wrapper}
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""")
def _stub_exe_wrapper(self) -> str:
return textwrap.dedent('''\
#!/usr/bin/env python3
import subprocess
import sys
sys.exit(subprocess.run(sys.argv[1:]).returncode)
''')
def test_needs_exe_wrapper_true(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
def test_needs_exe_wrapper_false(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=False))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertNotRegex(out, r'Skipped:\s*1\n')
def test_needs_exe_wrapper_true_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
s = Path(d) / 'wrapper.py'
with s.open('wt') as f:
f.write(self._stub_exe_wrapper())
s.chmod(0o774)
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(
needs_exe_wrapper=True,
exe_wrapper=[str(s)]))
self.init(testdir, extra_args=['--cross-file=' + str(p), '-Dexpect=true'])
out = self.run_target('test')
self.assertRegex(out, r'Ok:\s*3\s*\n')
def test_cross_exe_passed_no_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
self.build()
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
# The test uses mocking and thus requires that the current process is the
# one to run the Meson steps. If we are using an external test executable
# (most commonly in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = self._cross_file_generator()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def helper_create_cross_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, f'generated{self.current_config}.config')
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write(f'[{section}]\n')
for k, v in entries.items():
f.write(f"{k}='{v}'\n")
return filename
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overridden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_user_options(self):
# This is just a touch test for cross file, since the implementation
# shares code after loading from the files
testcase = os.path.join(self.common_test_dir, '41 options')
config = self.helper_create_cross_file({'project options': {'testoption': 'some other value'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--cross-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--cross-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('No c++ standard set?')
def test_builtin_options_per_machine(self):
"""Test options that are allowed to be set on a per-machine basis.
Such options could be passed twice, once for the build machine, and
once for the host machine. I've picked pkg-config path, but any would
do that can be set for both.
"""
testcase = os.path.join(self.common_test_dir, '2 cpp')
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross/path', 'cpp_std': 'c++17'}})
native = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native/path', 'cpp_std': 'c++14'}})
# Ensure that PKG_CONFIG_PATH is not set in the environment
with mock.patch.dict('os.environ'):
for k in ['PKG_CONFIG_PATH', 'PKG_CONFIG_PATH_FOR_BUILD']:
try:
del os.environ[k]
except KeyError:
pass
self.init(testcase, extra_args=['--cross-file', cross, '--native-file', native])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/cross/path'])
found += 1
elif each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++17')
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/native/path'])
found += 1
elif each['name'] == 'build.cpp_std':
self.assertEqual(each['value'], 'c++14')
found += 1
if found == 4:
break
self.assertEqual(found, 4, 'Did not find all sections.')
def test_builtin_options_conf_overrides_env(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native'}})
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross'}})
self.init(testcase, extra_args=['--native-file', config, '--cross-file', cross],
override_envvars={'PKG_CONFIG_PATH': '/bar', 'PKG_CONFIG_PATH_FOR_BUILD': '/dir'})
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/cross'])
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/native'])
found += 1
if found == 2:
break
self.assertEqual(found, 2, 'Did not find all sections.')
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser()
return iter(parser.parse(io.StringIO(s)))
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, num_tests=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, num_tests=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, num_tests=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, num_tests=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, num_tests=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, num_tests=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, num_tests=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, num_tests=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
class SubprojectsCommandTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.root_dir = Path(self.builddir)
self.project_dir = self.root_dir / 'src'
self._create_project(self.project_dir)
self.subprojects_dir = self.project_dir / 'subprojects'
os.makedirs(str(self.subprojects_dir))
def _create_project(self, path, project_name='dummy'):
os.makedirs(str(path), exist_ok=True)
with open(str(path / 'meson.build'), 'w') as f:
f.write(f"project('{project_name}')")
def _git(self, cmd, workdir):
return git(cmd, str(workdir), check=True)[1].strip()
def _git_config(self, workdir):
self._git(['config', 'user.name', 'Meson Test'], workdir)
self._git(['config', 'user.email', 'meson.test@example.com'], workdir)
def _git_remote(self, cmd, name):
return self._git(cmd, self.root_dir / name)
def _git_local(self, cmd, name):
return self._git(cmd, self.subprojects_dir / name)
def _git_local_branch(self, name):
# Same as `git branch --show-current` but compatible with older git version
branch = self._git_local(['rev-parse', '--abbrev-ref', 'HEAD'], name)
return branch if branch != 'HEAD' else ''
def _git_local_commit(self, name, ref='HEAD'):
return self._git_local(['rev-parse', ref], name)
def _git_remote_commit(self, name, ref='HEAD'):
return self._git_remote(['rev-parse', ref], name)
def _git_create_repo(self, path):
# If a user has git configuration init.defaultBranch set we want to override that
with tempfile.TemporaryDirectory() as d:
out = git(['--version'], str(d))[1]
if version_compare(mesonbuild.environment.search_version(out), '>= 2.28'):
extra_cmd = ['--initial-branch', 'master']
else:
extra_cmd = []
self._create_project(path)
self._git(['init'] + extra_cmd, path)
self._git_config(path)
self._git(['add', '.'], path)
self._git(['commit', '-m', 'Initial commit'], path)
def _git_create_remote_repo(self, name):
self._git_create_repo(self.root_dir / name)
def _git_create_local_repo(self, name):
self._git_create_repo(self.subprojects_dir / name)
def _git_create_remote_commit(self, name, branch):
self._git_remote(['checkout', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', f'initial {branch} commit'], name)
def _git_create_remote_branch(self, name, branch):
self._git_remote(['checkout', '-b', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', f'initial {branch} commit'], name)
def _git_create_remote_tag(self, name, tag):
self._git_remote(['commit', '--allow-empty', '-m', f'tag {tag} commit'], name)
self._git_remote(['tag', tag], name)
def _wrap_create_git(self, name, revision='master'):
path = self.root_dir / name
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
'''
[wrap-git]
url={}
revision={}
'''.format(os.path.abspath(str(path)), revision)))
def _wrap_create_file(self, name, tarball='dummy.tar.gz'):
path = self.root_dir / tarball
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
'''
[wrap-file]
source_url={}
'''.format(os.path.abspath(str(path)))))
def _subprojects_cmd(self, args):
return self._run(self.meson_command + ['subprojects'] + args, workdir=str(self.project_dir))
def test_git_update(self):
subp_name = 'sub1'
# Create a fake remote git repository and a wrap file. Checks that
# "meson subprojects download" works.
self._git_create_remote_repo(subp_name)
self._wrap_create_git(subp_name)
self._subprojects_cmd(['download'])
self.assertPathExists(str(self.subprojects_dir / subp_name))
self._git_config(self.subprojects_dir / subp_name)
# Create a new remote branch and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new branch.
self._git_create_remote_branch(subp_name, 'newbranch')
self._wrap_create_git(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch. Checks the new commit is pulled into existing
# local newbranch. Make sure it does not print spurious 'git stash' message.
self._git_create_remote_commit(subp_name, 'newbranch')
out = self._subprojects_cmd(['update', '--reset'])
self.assertNotIn('No local changes to save', out)
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch and switch to another branch. Checks that it
# switch current branch to newbranch and pull latest commit.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Stage some local changes then update. Checks that local changes got
# stashed.
self._create_project(self.subprojects_dir / subp_name, 'new_project_name')
self._git_local(['add', '.'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
self.assertTrue(self._git_local(['stash', 'list'], subp_name))
# Create a new remote tag and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new tag in detached mode.
self._git_create_remote_tag(subp_name, 'newtag')
self._wrap_create_git(subp_name, 'newtag')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newtag'))
# Create a new remote commit and update the wrap file with the commit id.
# Checks that "meson subprojects update --reset" checkout the new commit
# in detached mode.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
new_commit = self._git_remote(['rev-parse', 'HEAD'], subp_name)
self._wrap_create_git(subp_name, new_commit)
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), new_commit)
# Create a local project not in a git repository, then update it with
# a git wrap. Without --reset it should print error message and return
# failure. With --reset it should delete existing project and clone the
# new project.
subp_name = 'sub2'
self._create_project(self.subprojects_dir / subp_name)
self._git_create_remote_repo(subp_name)
self._wrap_create_git(subp_name)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self._subprojects_cmd(['update'])
self.assertIn('Not a git repository', cm.exception.output)
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name))
@skipIfNoExecutable('true')
def test_foreach(self):
self._create_project(self.subprojects_dir / 'sub_file')
self._wrap_create_file('sub_file')
self._git_create_local_repo('sub_git')
self._wrap_create_git('sub_git')
self._git_create_local_repo('sub_git_no_wrap')
def ran_in(s):
ret = []
prefix = 'Executing command in '
for l in s.splitlines():
if l.startswith(prefix):
ret.append(l[len(prefix):])
return sorted(ret)
dummy_cmd = ['true']
out = self._subprojects_cmd(['foreach'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git', 'subprojects/sub_git_no_wrap']))
out = self._subprojects_cmd(['foreach', '--types', 'git,file'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git']))
out = self._subprojects_cmd(['foreach', '--types', 'file'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_file'])
out = self._subprojects_cmd(['foreach', '--types', 'git'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_git'])
def _clang_at_least(compiler: 'Compiler', minver: str, apple_minver: T.Optional[str]) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
if apple_minver is None:
return False
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.CFLAGS_MAPPING.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
if arg in ('-f', '--failfast'):
arg = '--exitfirst'
pytest_args.append(arg)
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def running_single_tests(argv, cases):
'''
Check whether we only got arguments for running individual tests, not
entire testcases, and not all testcases (no test args).
'''
got_test_arg = False
for arg in argv:
if arg.startswith('-'):
continue
for case in cases:
if not arg.startswith(case):
continue
if '.' not in arg:
# Got a testcase, done
return False
got_test_arg = True
return got_test_arg
def main():
unset_envs()
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests', 'SubprojectsCommandTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = []
# Don't use pytest-xdist when running single unit tests since it wastes
# time spawning a lot of processes to distribute tests to in that case.
if not running_single_tests(sys.argv, cases):
pytest_args += ['-n', 'auto']
pytest_args += ['./run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
# Fallback to plain unittest.
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
print('Meson build system', mesonbuild.coredata.version, 'Unit Tests')
start = time.monotonic()
try:
raise SystemExit(main())
finally:
print('Total time: {:.3f} seconds'.format(time.monotonic() - start))
|
VentanaMain.py
|
# -*- coding: UTF-8 -*-
# @author JoseMariaRomeroARK visit my gitHub site at: https://github.com/JoseMariaRomeroARK
from tkinter import *
import Utilidades
import os, shutil, threading
from VentanAux import createNew
listaFormatos =[".mp3",".aac",".m4a",".mp4",".wma",".wav",".atrac",".m4p",".m3p",".flac",".midi",".oog"]
ruta = Utilidades.getRuta()
#
# Ventana principal
#
def createNewMain(lista):
ventana = Tk()
vrd = PhotoImage(file=(ruta+'verde.png'),format='png')
rj = PhotoImage(file=(ruta+'rojo.png'),format='png')
marco = Frame(ventana)
marco.grid(column=0,row=0,padx=(60,60),pady=(15,15))
#Copia un(o conjunto de) archivo(s) a la carpeta de destino
# e inserta su(s) nombre(s) en el Listbox de destino además de quitarlo del de origen
def copiar(Marcador,ListboxOrig,ListboxDest,dirOrig,dirDest):
try:
for seleccionado in ListboxOrig.curselection()[::-1]:
nombre = ListboxOrig.get(seleccionado)
print("Copying: "+nombre)
shutil.copyfile(dirOrig+"/"+nombre,dirDest+"/"+nombre)
ListboxOrig.delete(seleccionado)
ListboxDest.insert(END,nombre)
print("Copied")
except:
print("File error, maybe you are tring to copy a folder.")
finally:
cambiar_a_verde()
#Borra un(o conjunto de) archivo(s) de la carpeta de destino
# e inserta su(s) nombre(s) en el Listbox de origen además de quitarlo del de destino
def borrar(ListboxOrig,ListboxDest,dirOrig,dirDest):
try:
for seleccionado in ListboxDest.curselection()[::-1]:
nombre = ListboxDest.get(seleccionado)
print("Removeing: "+nombre)
os.remove(dirDest+"/"+nombre)
ListboxDest.delete(seleccionado)
ListboxOrig.insert(END,nombre)
print("Removed")
except:
print("File error")
finally:
cambiar_a_verde()
#Cambia el color del indicador de estado a rojo (ocupado)
def cambiar_a_rojo():
Ocupado.set(True)
imagenLB.config(image=rj)
ventana.title("Movile Music Mananger (M3)")
#Cambia el color del indicador de estado a verde (libre)
def cambiar_a_verde():
Ocupado.set(False)
imagenLB.config(image=vrd)
ventana.title("Movile Music Mananger (M3)")
#Corre un Thread que copia el/los archivos seleccionados
def ThreadCopiar(Marcador,ListboxOrig,ListboxDest,dirOrig,dirDest):
if not Ocupado.get():
cambiar_a_rojo()
ventana.title("Movile Music Mananger (M3) - Copying...")
threading.Thread(target=copiar,args=(Marcador,ListboxOrig,ListboxDest,dirOrig,dirDest,)).start()
#Corre un Thread que borra el/los archivos seleccionados
def ThreadBorrar(Marcador,ListboxOrig,ListboxDest,dirOrig,dirDest):
if not Ocupado.get():
cambiar_a_rojo()
ventana.title("Movile Music Mananger (M3) - Removeing...")
threading.Thread(target=borrar,args=(ListboxOrig,ListboxDest,dirOrig,dirDest,)).start()
#comoponetes de la interfaz
#Variables:
Ocupado = BooleanVar(value=False)
varOrigen = StringVar(value=lista[1])
varDestino = StringVar(value=lista[2])
#Fila 0:
lblOrigen= Label(marco,text="Origin: ")
imagenLB = Label(marco,image=vrd)
lblDestino= Label(marco,text="Destination: ")
#Fila 2:
botonCambiarO = Button(marco,text="Change origin",command= lambda: cambioValores(Ocupado,ventana,varDestino.get(),varOrigen.get()))
botonCambiarD = Button(marco,text="Change destination",command= lambda: cambioValores(Ocupado,ventana,varDestino.get(),varOrigen.get()))
#Fila 3:
marcoOrigen = Frame(marco)
ScrollbarMarcoOrigen = Scrollbar(marcoOrigen,orient=VERTICAL)
ListboxMarcoOrigen = Listbox(marcoOrigen,height=30,width=37,selectmode=EXTENDED,yscrollcommand=ScrollbarMarcoOrigen.set)
ScrollbarMarcoOrigen.config(command=ListboxMarcoOrigen.yview)
ScrollbarMarcoOrigen.pack(side=RIGHT,fill=Y)
ListboxMarcoOrigen.pack(side=LEFT,fill=BOTH)
marcoPasar = Frame(marco)
BotonPasar = Button(marcoPasar,text="-->",command=lambda: ThreadCopiar(imagenLB,ListboxMarcoOrigen,ListboxMarcoDestino,varOrigen.get(),varDestino.get()))
BotonQuitar = Button(marcoPasar,text="<--",command=lambda: ThreadBorrar(imagenLB,ListboxMarcoOrigen,ListboxMarcoDestino,varOrigen.get(),varDestino.get()))
BotonPasar.grid(row=0,column=0)
BotonQuitar.grid(row=1,column=0)
marcoDestino = Frame(marco)
ScrollbarMarcoDestino = Scrollbar(marcoDestino,orient=VERTICAL)
ListboxMarcoDestino = Listbox(marcoDestino,height=30,width=37,selectmode=EXTENDED,yscrollcommand=ScrollbarMarcoDestino.set)
ScrollbarMarcoDestino.config(command=ListboxMarcoDestino.yview)
ScrollbarMarcoDestino.pack(side=RIGHT,fill=Y)
ListboxMarcoDestino.pack(side=RIGHT,fill=BOTH)
#Fila 4:
lblOrigenCarpeta= Label(marco,text=varOrigen.get())
lblDestinoCarpeta= Label(marco,text=varDestino.get())
#distribución de la interfaz
#row 0(Fila 1):
lblOrigen.grid(row=0,column=0)
imagenLB.grid(row=0,column=1)
lblDestino.grid(row=0,column=2)
#row 1(Fila 2):
botonCambiarO.grid(row=1,column=0)
botonCambiarD.grid(row=1,column=2)
#row 2(Fila 3):
marcoOrigen.grid(row=2,column=0)
marcoPasar.grid(row=2,column=1)
marcoDestino.grid(row=2,column=2)
#row 3(Fila 4):
lblOrigenCarpeta.grid(row=3,column=0)
lblDestinoCarpeta.grid(row=3,column=2)
#Damos valores:
rellenar(ListboxMarcoOrigen,ListboxMarcoDestino,varOrigen.get(),varDestino.get())
#Ajustamos la ventana
Utilidades.setPref(ventana)
#Arrancamos
ventana.mainloop()
#Usamos este metodo para combiar los valores de Destino de datos y Origen de datos
def cambioValores(Ocupado,ventana,valDestino,valOrigen):
if not Ocupado.get():
ventana.destroy()
createNew(valDestino,valOrigen)
#Recopilamos los items de cada conjunto
def rellenar(ListboxOrig,ListboxDest,dirOrig,dirDest):
try:
#Lo que hay en el origen de datos (A)
ListaObjetosOrigenFiltrados = list()
ListaCarpetas = list()
ListaObjetosOrigen = os.listdir(dirOrig)
for item in ListaObjetosOrigen:
if '.' in item:
for formato in listaFormatos:
if formato in item:
ListaObjetosOrigenFiltrados.append(item)
break
else:
ListaCarpetas.append("Folder: "+item)
#Lo que hay en el destino de datos (B)
ListaObjetosDestinoFiltrados = list()
ListaObjetosDestino = os.listdir(dirDest)
for item in ListaObjetosDestino:
if '.' in item:
for formato in listaFormatos:
if formato in item:
ListaObjetosDestinoFiltrados.append(item)
break
#Lo que debe mostrarse en el ListBoxDestino = [AUB](c)
conjuntoD = list()
for cancionD in ListaObjetosOrigenFiltrados:
for cancion in ListaObjetosDestinoFiltrados:
if cancion == cancionD:
conjuntoD.append(cancionD)
ListboxDest.insert(END,cancionD)
break
#Lo que debe mostrarse en el ListBoxOrigen = A-C (D)
#(añadimos las carpetas para mejorara la experiencia del usuario)
for carpeta in ListaCarpetas:
ListboxOrig.insert(END,carpeta)
#reelemanos con canciones
for cancion in ListaObjetosOrigenFiltrados:
coincidencias = 0
for cancionD in conjuntoD:
if cancion == cancionD:
coincidencias += 1
if coincidencias == 0:
ListboxOrig.insert(END,cancion)
except:
print("Error\nOrigin directory: "+dirOrig+"\nDestination directory: "+dirDest)
|
DLHandler.py
|
import logging
from .DLInfos import *
from .DLProgress import *
from .packer import Packer
import time, os
from .DLThreadPool import ThreadPool
# LOG_FORMAT = "%(asctime)s,%(msecs)03d - %(levelname)s - %(threadName)-12s - (%(progress)s)[%(urlid)s] - %(message)s"
#
# logging.basicConfig(format=LOG_FORMAT, datefmt="%m/%d/%Y %H:%M:%S", level=logging.CRITICAL)
#
# logger = logging.getLogger('nbdler')
__URL_NODE_PARAMS__ = {
'urls': 'url',
'cookies': 'cookie',
'hosts': 'host',
'ports': 'port',
'paths': 'path',
'headers': 'headers',
'max_threads': 'max_thread',
'range_formats': 'range_format',
# 'pull_flags': 'pull_flag'
}
__CONFIG__ = {
'filename': 'file.name',
'filepath': 'file.path',
'block_size': 'file.BLOCK_SIZE',
'max_conn': 'url.max_conn',
'buffer_size': 'file.buffer_size',
'max_speed': 'url.max_speed',
'wait_for_run': '_wait_for_run',
'daemon': '_daemon',
'max_retry': 'url.max_retry'
}
class Handler(Packer, object):
def __init__(self):
self.url = UrlPool(self)
self.file = File(self)
self.threads = ThreadPool(False)
self.__globalprog__ = GlobalProgress(self, AUTO)
self.status = self.__globalprog__.status
self.__new_project__ = True
self.globalprog = self.__globalprog__
# self.shutdown_flag = False
# self._wait_for_run = False
self._batchnode_bak = None
self._daemon = False
def setDaemon(self, daemonic):
self._daemon = daemonic
self.threads.setDaemon(daemonic)
def uninstall(self):
self.globalprog = self.__globalprog__
def install(self, GlobalProgress):
self.globalprog = GlobalProgress
def __batchAdd__(self, pack_yield):
for iter_kw in pack_yield:
self.addNode(**iter_kw)
def batchAdd(self, **kwargs):
global __URL_NODE_PARAMS__
pack_yield = []
iter_len = len(kwargs.get('urls', []))
for i in range(iter_len):
node = {}
for m, n in __URL_NODE_PARAMS__.items():
if m in kwargs:
if len(kwargs[m]) == 1:
node[n] = kwargs[m][0]
elif len(kwargs[m]) == iter_len:
node[n] = kwargs[m][i]
else:
raise ValueError('IterLenError')
pack_yield.append(node)
self.threads.Thread(target=self.__batchAdd__, args=(pack_yield,), name=cv.ADDNODE).start()
print(self.file.name)
def addNode(self, *args, **kwargs):
self.url.addNode(*args, **kwargs)
def delete(self, url=None, urlid=None):
if urlid:
self.url.delete(urlid)
elif url:
for i in self.url._url_box.values():
if i.url == url:
self.url.delete(i.id)
def insert(self, begin, end, Urlid=None, thread_num=1):
put_urlid = self.globalprog.allotter.assignUrlid() if not Urlid else Urlid
if put_urlid != -1:
print('fs_insert')
self.globalprog.fs.insert(begin, end)
for i in self.globalprog.allotter.splitRange((begin, end), thread_num):
self.globalprog.insert(put_urlid, i[0], i[1])
def manualRun(self):
if not self.globalprog.progresses:
raise Exception('EmptyEqueue')
self.globalprog.run()
def trap(self):
self.globalprog.trap()
def join(self):
self.globalprog.join()
def isCritical(self):
return self.globalprog.isCritical()
def config(self, **kwargs):
for i, j in __CONFIG__.items():
if i in kwargs:
objs = j.split('.')
if len(objs) == 1:
setattr(self, objs[0], kwargs[i])
else:
attr = getattr(self, objs[0])
for m in objs[1:-1]:
attr = getattr(attr, m)
setattr(attr, objs[-1], kwargs[i])
def close(self):
if not self.status.isEnd():
raise RuntimeError("download isn't completed.")
self.join()
if os.path.isfile(os.path.join(self.file.path, self.file.name + '.nbdler')):
os.remove(os.path.join(self.file.path, self.file.name + '.nbdler'))
def __run__(self):
if self.file.size == -1 and self._batchnode_bak:
self.batchAdd(**self._batchnode_bak)
# for i in self.threads.getThreads(cv.ADDNODE):
while self.file.size == -1:
if not self.threads.getAll(cv.ADDNODE):
if self.file.size == -1:
return
time.sleep(0.01)
if self.__new_project__:
self.file.makeFile()
# if self.file.size == -1:
# return
self.globalprog.allotter.makeBaseConn()
self.globalprog.save()
self.__new_project__ = False
self.globalprog.run()
def run(self):
self.threads.Thread(target=self.__run__, name=cv.LAUNCHER).start()
def pause(self):
self.globalprog.pause()
print(self.file.name, 'paused')
shutdown = pause
# def pausing(self):
# return self.globalprog.status.pausing()
def isEnd(self):
return self.status.isEnd()
def unpack(self, packet):
Packer.unpack(self, packet)
self.__new_project__ = False
# def shutdown(self):
# # if not self.isEnd():
# # self.globalprog.shutdown_flag = True
# self.globalprog.shutdown()
# # self.globalprog.shutdown_flag = False
def __packet_params__(self):
return ['url', 'file', '__globalprog__']
def getFileName(self):
return self.file.name if self.file.name else None
def getFileSize(self):
return self.file.size
def getAllUrl(self):
return self.url._url_box
def getInsSpeed(self):
return self.globalprog.getInsSpeed()
def getAvgSpeed(self):
return self.globalprog.getAvgSpeed()
def getLeft(self):
return self.globalprog.getLeft()
def getIncByte(self):
return self.getFileSize() - self.getLeft() if self.getFileSize() != -1 else 0
def getOnlines(self):
return self.globalprog.getOnlines()
def getConnections(self):
return self.globalprog.getConnections()
# def getBlockMap(self):
# return self.globalprog.getMap()
def getFileStorage(self):
return self.globalprog.fs
# def getSegsValue(self):
# return self.globalprog.fs.getvalue()
#
# def getSegsSize(self):
# return self.globalprog.fs.getStorageSize()
def getUrlsThread(self):
return self.globalprog.allotter.getUrlsThread()
def __repr__(self):
return '[%s] - %s' % (self.file.size, self.file.name)
|
run_jobs.py
|
__author__ = 'ones'
import os, sys, threading
import error_handling
working_dir = os.getcwd()
directories = []
zombies = []
if len(sys.argv) == 1:
os.system('find -mindepth 3 -type d > compounds_directories')
elif len(sys.argv) == 2:
root = sys.argv[1]
depth = root.count('/')
os.system('find ./'+root+' -mindepth '+str(2-depth)+' -type d > compounds_directories')
else:
exit("No more than 1 argument")
dir_f = open('compounds_directories', 'r')
lines = dir_f.readlines()
dir_f.close()
max_depth = 0
for line in lines:
depth = len(filter(None, line.split('/')))
if depth > max_depth:
max_depth = depth
for line in lines:
line = line.strip()[1:]
if (len(filter(None, line.split('/'))) == max_depth-1):
directories.append(working_dir+line)
for directory in directories:
os.chdir(directory)
runjob = 'qsub runjob.sh'
os.system(runjob)
# t1 = threading.Thread(target= runjob, args=(directory, ))
# t1.setDaemon(True)
# t1.start()
# t2 = threading.Thread(target= handle_errors, args=(directory, ))
# t2.setDaemon(True)
# t2.start()
#
# process = os.fork()
# if process == 0:
# os.chdir(directory)
# runjob = 'qsub runjob.sh'
# os.system(runjob)
#
#
# else:
# error_handling.check_error_logfile(directory, process)
#
#
# zombies.append(process)
#
# for zombie in zombies:
# os.waitpid(zombie, 0)
|
stereopi.py
|
#!/usr/bin/python3 -u
import math
import os
import threading
import time
import RPi.GPIO as GPIO
import board
import neopixel
import wakeup
switch_pin = 13
led_pin = board.D12
pixels = neopixel.NeoPixel(led_pin, 1)
GPIO.setmode(GPIO.BCM)
GPIO.setup(switch_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
print("Starting...")
dimmer = .04
from remote_service import RemoteService
def run_alarm(time):
wakeup.set_alarmclock(time)
def set_alarm(alarm):
threading.Thread(target=run_alarm, args=(alarm,)).start()
def timer(silent = False):
timer_mins = 20
if silent:
os.system('sudo systemctl stop tuner')
os.system(f"say Silent")
else:
os.system('sudo systemctl start tuner')
os.system(f"say Timer {timer_mins} minutes")
time.sleep(timer_mins * 60)
if silent:
os.system('sudo systemctl start tuner')
else:
os.system('sudo systemctl stop tuner')
def minute_countdown(duration):
for i in range(0, duration):
os.system(f"say {duration - i}")
time.sleep(60)
os.system('say You are done')
def custom_timer(duration):
os.system(f"say Timer {duration} minutes")
time.sleep(duration * 60)
os.system(f"say Timer {duration} minutes done")
def daily_workout():
os.system('say Warmup')
time.sleep(60)
os.system('say Stand')
time.sleep(60)
os.system('say Dog')
time.sleep(60)
os.system('say Belly Ramp')
time.sleep(60)
os.system('say Knee head')
time.sleep(30)
os.system('say change')
time.sleep(30)
os.system('say Crack')
time.sleep(60)
os.system('say Cycle')
time.sleep(60)
os.system('say Baby')
time.sleep(60)
os.system('say Child')
time.sleep(60)
os.system("say You're done")
def playpause():
pixels[0] = (0, 0, 0)
os.system('/usr/bin/mpc pause &')
def button_pressed(channel):
playpause()
def start_timer():
threading.Thread(target=timer).start()
def start_minute_countdown(duration):
threading.Thread(target=minute_countdown, args=[duration]).start()
def start_custom_timer(duration):
threading.Thread(target=custom_timer, args=[duration]).start()
def start_daily_workout():
threading.Thread(target=daily_workout, args=[]).start()
def start_silent_timer():
threading.Thread(target=timer,args=(True,)).start()
def on_key_pressed(key):
print(key)
if key == 'KEY_': pass
if key == 'KEY_FASTFORWARD': os.system('curl -X POST "https://api.spotify.com/v1/me/player/next" -H "Authorization: Bearer BQDy-sPybW8wtFbDhA9VfbTN1PSnoNZ6RHTzQrykoQgbvSXiSjbpotv3Tx6QzVzFt0WtNYXBgANRULfVczCpq9tjNfw_wpSMRwFNhW4fLyBXODHcs-r_C8JSQwyhcSIHdjS7ntgGE7scyAg" &')
if key == 'KEY_REWIND': os.system('curl -X POST "https://api.spotify.com/v1/me/player/previous" -H "Authorization: Bearer BQDy-sPybW8wtFbDhA9VfbTN1PSnoNZ6RHTzQrykoQgbvSXiSjbpotv3Tx6QzVzFt0WtNYXBgANRULfVczCpq9tjNfw_wpSMRwFNhW4fLyBXODHcs-r_C8JSQwyhcSIHdjS7ntgGE7scyAg" &')
if key == 'KEY_SEARCH':
os.system('say `sudo python3 /home/pi/stereopi/time_to_speech.py` &')
elif key == 'KEY_RED':
pixels[0] = (int(255 * dimmer), 0, 0)
elif key == 'KEY_GREEN':
pixels[0] = (0, int(255 * dimmer), 0)
elif key == 'KEY_YELLOW':
pixels[0] = (int(255 * dimmer), int(255 * dimmer), 0)
elif key == 'KEY_BLUE':
pixels[0] = (0, 0, int(255 * dimmer))
elif key == 'KEY_PLAYPAUSE':
playpause()
elif key == 'KEY_VOLUMEUP':
os.system("amixer set PCM 5%+")
elif key == 'KEY_RECORD':
os.system("say record")
elif key == 'KEY_INFO':
os.system("sudo systemctl stop tuner")
os.system("sudo systemctl restart shairport-sync")
os.system("sudo systemctl restart raspotify")
elif key == 'KEY_TUNER':
os.system('sudo systemctl restart raspotify')
os.system("sudo systemctl restart tuner")
os.system("say starting tuner")
elif key == 'KEY_VOLUMEDOWN':
os.system("amixer set PCM 5%-")
elif key == 'KEY_1':
start_daily_workout()
elif key == 'KEY_5':
start_custom_timer(5)
elif key == 'KEY_0':
start_custom_timer(10)
elif key == 'KEY_PREVIOUSSONG':
start_silent_timer()
elif key == 'KEY_NEXTSONG':
start_timer()
elif key == 'KEY_HOMEPAGE':
alarm = '6:15'
os.system(f"say setting alarm to {alarm}")
set_alarm(alarm)
elif key == 'KEY_ENTER':
import random
os.system('say tossing a coin')
time.sleep(1)
result = 'heads' if random.randint(0, 1) == 1 else 'tails'
os.system(f'say {result}')
else:
pixels[0] = (int(255 * dimmer), 0, int(255 * dimmer))
GPIO.add_event_detect(switch_pin, GPIO.FALLING, callback=button_pressed, bouncetime=250)
service = RemoteService()
service.start_listening(on_key_pressed) # This call is blocking so we never come here
x = 0
increment = 0.1
sleep = 0.01
while True:
if GPIO.input(switch_pin) == 0:
pixels[0] = (0, 0, int(255 * dimmer))
blue = abs(int(math.sin(x) * 255 * dimmer))
red = abs(int(math.cos(x) * 255 * dimmer))
# green = abs(int(math.cos(x + math.pi/4)*255*dimmer))
pixels[0] = (red, 0, blue)
x = x + increment
time.sleep(sleep)
|
multiprocessing_queue_1.py
|
#!/usr/bin/env python3
import multiprocessing
import time
import random
import os
from multiprocessing import Queue
q_1 = Queue()
def hello(q, n):
time.sleep(random.randint(1,3))
q.put(os.getpid())
print("[{0}] Hello!".format(n))
processes = [ ]
for i in range(10):
t = multiprocessing.Process(target=hello, args=(q_1, i,))
processes.append(t)
for process in processes:
process.start()
for process in processes:
process.join()
mylist = [ ]
while not q_1.empty():
mylist.append(q_1.get())
print("Done!")
print(len(mylist))
print(mylist)
|
noticeboard_server.py
|
#!/usr/bin/env python3.9
import socket
import argparse
import time
import threading
import queue
import logging
from nblib import send_message, recv_message
DEFAULT_PORT = 12345
DEFAULT_WORKER_COUNT = 3
DEFAULT_HOST = "127.0.0.1"
DEFAULT_TIMEOUT = 13 # use prime as timeout
LISTEN_QUEUE_SIZE = 5 # how many connection requests to queue for listen()
s = socket.socket() # Create a socket object
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# host_name = socket.gethostname()
# avoid error
# socket.gaierror: [Errno 8] nodename nor servname provided, or not known
host_name = DEFAULT_HOST
host_ip = socket.gethostbyname(host_name)
# Parse args
parser = argparse.ArgumentParser(description='Run noticeboard server')
# --host
parser.add_argument('--host', default=host_ip, type=str,
help='host to connect to')
# --port n
parser.add_argument('--port', default=DEFAULT_PORT, type=int,
help='host port to connect to')
# --workers n
parser.add_argument('--workers', default=DEFAULT_WORKER_COUNT, type=int,
help='number of worker threads')
# Set up logging
logging.basicConfig(encoding='utf-8',
level=logging.INFO,
format='%(asctime)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
args = parser.parse_args()
host = args.host
port = args.port
num_workers = args.workers
# generic worker
def worker(q, handler):
while True:
try:
job = q.get(timeout=DEFAULT_TIMEOUT)
except queue.Empty:
continue
handler(job)
q.task_done()
def output_handler(request):
logging.info(request)
output_q = queue.Queue()
def output(text):
output_q.put_nowait(text)
output_thread = threading.Thread(target=worker, args=[output_q, output_handler], daemon=True)
output_thread.start()
output("host_name: %s" % host_name)
output("host_ip: %s" % host_ip)
output("host: %s" % host)
output("port: %s" % port)
# Global store for messages
MESSAGES = {}
# Record which message a reply is added to
REPLIES = {}
def new_message(text):
return { "message" : text, "replies" : {} }
def action_echo(request):
output("They said '%s'" % request["message"])
text = 'You said %s' % request["message"]
response = {
"status" : "ok",
"message" : text
}
return response
def action_post(request):
request_id = request["request_id"]
MESSAGES[request_id] = new_message(request["message"])
response = {
"status" : "ok",
"id" : request_id
}
return response
def action_readall(request):
response = {
"status" : "ok",
"messages" : MESSAGES
}
return response
def action_read(request):
msg_id = request["id"]
if msg_id in MESSAGES:
response = {
"status" : "ok",
"message" : MESSAGES[msg_id]
}
else:
text = "Unknown message id: '%s'" % msg_id
output(text)
response = {
"status" : "error",
"reason" : text
}
return response
def action_reply(request):
msg_id = request["id"]
if msg_id in MESSAGES:
request_id = request["request_id"]
msg = MESSAGES[msg_id]
msg["replies"][request_id] = request["message"]
REPLIES[request_id] = msg_id
response = {
"status" : "ok",
"id" : request_id
}
else:
text = "Unknown message id: '%s'" % msg_id
output(text)
response = {
"status" : "error",
"reason" : text
}
return response
MESSAGES_LOCK = threading.Lock()
def action_remove(request):
msg_id = request["id"]
with MESSAGES_LOCK:
if msg_id in MESSAGES or msg_id in REPLIES:
if msg_id in MESSAGES:
del MESSAGES[msg_id]
reply_ids = []
for reply_id in REPLIES:
if REPLIES[reply_id] == msg_id:
reply_ids.append(reply_id)
for reply_id in reply_ids:
del REPLIES[reply_id]
elif msg_id in REPLIES:
parent_id = REPLIES.pop(msg_id)
msg = MESSAGES[parent_id]
del msg["replies"][msg_id]
response = {
"status" : "ok",
"message" : "Message %s removed" % msg_id
}
else:
text = "Unknown message id: '%s'" % msg_id
output(text)
response = {
"status" : "error",
"reason" : text
}
return response
def action_unknown(request):
text = "Unknown action: '%s'" % request["action"]
output(text)
response = {
"status" : "error",
"reason" : text
}
return response
ACTIONS = {
"echo" : action_echo,
"post" : action_post,
"readall" : action_readall,
"read" : action_read,
"reply" : action_reply,
"remove" : action_remove,
"unknown" : action_unknown
}
def handle_request(request):
action = request["action"]
if not action in ACTIONS:
action = "unknown"
fn = ACTIONS[action]
return fn(request)
def request_handler(job):
request_id, conn = job
request = recv_message(conn)
request["request_id"] = request_id
msg = handle_request(request)
send_message(conn, msg)
conn.close() # Close the connection
jobq = queue.Queue()
for i in range(num_workers):
t = threading.Thread(target=worker, args=[jobq, request_handler], daemon=True)
t.start()
s.bind((host, port)) # Bind to the port
s.listen(LISTEN_QUEUE_SIZE) # Now wait for client connection.
request_id = 0
while True:
try:
conn, addr = s.accept() # Establish connection with client.
request_id += 1
output('%04d: Got connection from %s' % (request_id, addr))
jobq.put_nowait((request_id, conn))
except:
break
print("Server ending")
|
opencv_gst_camera.py
|
import traitlets
import atexit
import cv2
import threading
import numpy as np
from .camera_base import CameraBase
class OpenCvGstCamera(CameraBase):
value = traitlets.Any()
# config
width = traitlets.Integer(default_value=224).tag(config=True)
height = traitlets.Integer(default_value=224).tag(config=True)
fps = traitlets.Integer(default_value=30).tag(config=True)
capture_width = traitlets.Integer(default_value=816).tag(config=True)
capture_height = traitlets.Integer(default_value=616).tag(config=True)
def __init__(self, *args, **kwargs):
self.value = np.empty((self.height, self.width, 3), dtype=np.uint8)
super().__init__(self, *args, **kwargs)
try:
self.cap = cv2.VideoCapture(self._gst_str(), cv2.CAP_GSTREAMER)
re, image = self.cap.read()
if not re:
raise RuntimeError('Could not read image from camera.')
self.value = image
self.start()
except:
self.stop()
raise RuntimeError(
'Could not initialize camera. Please see error trace.')
atexit.register(self.stop)
def _capture_frames(self):
while True:
re, image = self.cap.read()
if re:
self.value = image
else:
break
def _gst_str(self):
return 'nvarguscamerasrc sensor-mode=3 ! video/x-raw(memory:NVMM), width=%d, height=%d, format=(string)NV12, framerate=(fraction)%d/1 ! nvvidconv ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! videoconvert ! appsink' % (
self.capture_width, self.capture_height, self.fps, self.width, self.height)
def start(self):
if not self.cap.isOpened():
self.cap.open(self._gst_str(), cv2.CAP_GSTREAMER)
if not hasattr(self, 'thread') or not self.thread.isAlive():
self.thread = threading.Thread(target=self._capture_frames)
self.thread.start()
def stop(self):
if hasattr(self, 'cap'):
self.cap.release()
if hasattr(self, 'thread'):
self.thread.join()
def restart(self):
self.stop()
self.start()
@staticmethod
def instance(*args, **kwargs):
return OpenCvGstCamera(*args, **kwargs)
|
tests.py
|
"""
Unit tests for reverse URL lookups.
"""
import sys
import threading
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import (
HttpRequest, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.shortcuts import redirect
from django.test import SimpleTestCase, TestCase, override_settings
from django.test.utils import override_script_prefix
from django.urls import (
NoReverseMatch, Resolver404, ResolverMatch, URLPattern, URLResolver,
get_callable, get_resolver, get_urlconf, include, path, re_path, resolve,
reverse, reverse_lazy,
)
from django.urls.resolvers import RegexPattern
from . import middleware, urlconf_outer, views
from .utils import URLObject
from .views import empty_view
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_name, func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', '', '', 'normal-view', views.empty_view, (), {'arg1': '42', 'arg2': '37'}),
(
'/view_class/42/37/', 'view-class', '', '', 'view-class', views.view_class_instance, (),
{'arg1': '42', 'arg2': '37'}
),
(
'/included/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-normal-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/view_class/42/37/', 'inc-view-class', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-view-class',
views.view_class_instance, (), {'arg1': '42', 'arg2': '37'}
),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', '', '', 'mixed-args', views.empty_view, (), {'arg2': '37'}),
(
'/included/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, (), {'arg2': '37'}
),
(
'/included/12/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, (), {'arg2': '37'}
),
# Unnamed views should have None as the url_name. Regression data for #21157.
(
'/unnamed/normal/42/37/', None, '', '', 'urlpatterns_reverse.views.empty_view', views.empty_view, (),
{'arg1': '42', 'arg2': '37'}
),
(
'/unnamed/view_class/42/37/', None, '', '', 'urlpatterns_reverse.views.ViewClass', views.view_class_instance,
(), {'arg1': '42', 'arg2': '37'}
),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', '', '', 'no-kwargs', views.empty_view, ('42', '37'), {}),
(
'/included/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('42', '37'), {}
),
(
'/included/12/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('12', '42', '37'), {}
),
# Namespaces
(
'/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'test-ns1:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'inc-ns1', 'inc-ns1:inc-normal-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'testapp:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'other-ns2:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'other-ns1:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
# Nested namespaces
(
'/ns-included1/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'inc-ns1:test-ns3', 'inc-ns1:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-ns1:inc-ns4:inc-ns2:test-ns3',
'inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp', 'inc-app:test-ns3',
'inc-app:test-ns3:urlobject-view', views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-app:inc-ns4:inc-ns2:test-ns3',
'inc-app:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
# Namespaces capturing variables
(
'/inc70/', 'inner-nothing', 'included_urls', 'inc-ns5', 'inc-ns5:inner-nothing',
views.empty_view, (), {'outer': '70'}
),
(
'/inc78/extra/foobar/', 'inner-extra', 'included_urls', 'inc-ns5', 'inc-ns5:inner-extra',
views.empty_view, (), {'outer': '78', 'extra': 'foobar'}
),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('named_optional', '/optional/1/', [1], {}),
('named_optional', '/optional/1/', [], {'arg1': 1}),
('named_optional', '/optional/1/2/', [1, 2], {}),
('named_optional', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('named_optional_terminated', '/optional/1/2/', [1, 2], {}),
('named_optional_terminated', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], {'year': 2007, 'month': 5, 'day': 21}),
(
'windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [],
{'drive_name': 'C', 'path': r'Documents and Settings\spam'}
),
('special', r'/special_chars/~@+%5C$*%7C/', [r'~@+\$*|'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('test', '/test/1', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('inner-no-kwargs', '/outer-no-kwargs/42/inner-no-kwargs/1/', ['42', '1'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Tests for nested groups. Nested capturing groups will only work if you
# *only* supply the correct outer group.
('nested-noncapture', '/nested/noncapture/opt', [], {'p': 'opt'}),
('nested-capture', '/nested/capture/opt/', ['opt/'], {}),
('nested-capture', NoReverseMatch, [], {'p': 'opt'}),
('nested-mixedcapture', '/nested/capture/mixed/opt', ['opt'], {}),
('nested-mixedcapture', NoReverseMatch, [], {'p': 'opt'}),
('nested-namedcapture', '/nested/capture/named/opt/', [], {'outer': 'opt/'}),
('nested-namedcapture', NoReverseMatch, [], {'outer': 'opt/', 'inner': 'opt'}),
('nested-namedcapture', NoReverseMatch, [], {'inner': 'opt'}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
# Security tests
('security', '/%2Fexample.com/security/', ['/example.com'], {}),
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.no_urls')
class NoURLPatternsTests(SimpleTestCase):
def test_no_urls_exception(self):
"""
URLResolver should raise an exception when no urlpatterns exist.
"""
resolver = URLResolver(RegexPattern(r'^$'), settings.ROOT_URLCONF)
with self.assertRaisesMessage(
ImproperlyConfigured,
"The included URLconf 'urlpatterns_reverse.no_urls' does not "
"appear to have any patterns in it. If you see valid patterns in "
"the file then the issue is probably caused by a circular import."
):
getattr(resolver, 'url_patterns')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class URLPatternReverse(SimpleTestCase):
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
with self.subTest(name=name, args=args, kwargs=kwargs):
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(NoReverseMatch, expected)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
with self.assertRaises(NoReverseMatch):
reverse(None)
def test_mixing_args_and_kwargs(self):
msg = "Don't mix *args and **kwargs in call to reverse()!"
with self.assertRaisesMessage(ValueError, msg):
reverse('name', args=['a'], kwargs={'b': 'c'})
@override_script_prefix('/{{invalid}}/')
def test_prefix_braces(self):
self.assertEqual(
'/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include')
)
def test_prefix_parenthesis(self):
# Parentheses are allowed and should not cause errors or be escaped
with override_script_prefix('/bogus)/'):
self.assertEqual(
'/bogus)/includes/non_path_include/',
reverse('non_path_include')
)
with override_script_prefix('/(bogus)/'):
self.assertEqual(
'/(bogus)/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/bump%20map/')
def test_prefix_format_char(self):
self.assertEqual(
'/bump%2520map/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/%7Eme/')
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022, adjusted for #24013 because ~ is an unreserved
# character. Tests whether % is escaped.
self.assertEqual('/%257Eme/places/1/', reverse('places', args=[1]))
def test_patterns_reported(self):
# Regression for #17076
with self.assertRaisesMessage(NoReverseMatch, r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"):
# this url exists, but requires an argument
reverse("people", args=[])
@override_script_prefix('/script:name/')
def test_script_name_escaping(self):
self.assertEqual(
reverse('optional', args=['foo:bar']),
'/script:name/optional/foo:bar/'
)
def test_view_not_found_message(self):
msg = (
"Reverse for 'nonexistent-view' not found. 'nonexistent-view' "
"is not a valid view function or pattern name."
)
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('nonexistent-view')
def test_no_args_message(self):
msg = "Reverse for 'places' with no arguments not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places')
def test_illegal_args_message(self):
msg = "Reverse for 'places' with arguments '(1, 2)' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', args=(1, 2))
def test_illegal_kwargs_message(self):
msg = "Reverse for 'places' with keyword arguments '{'arg1': 2}' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', kwargs={'arg1': 2})
class ResolverTests(SimpleTestCase):
def test_resolver_repr(self):
"""
Test repr of URLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced URLconf
resolver = get_resolver('urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<URLPattern list>', repr(sub_resolver))
def test_reverse_lazy_object_coercion_by_resolve(self):
"""
Verifies lazy object returned by reverse_lazy is coerced to
text by resolve(). Previous to #21043, this would raise a TypeError.
"""
urls = 'urlpatterns_reverse.named_urls'
proxy_url = reverse_lazy('named-url1', urlconf=urls)
resolver = get_resolver(urls)
resolver.resolve(proxy_url)
def test_resolver_reverse(self):
resolver = get_resolver('urlpatterns_reverse.named_urls')
test_urls = [
# (name, args, kwargs, expected)
('named-url1', (), {}, ''),
('named-url2', ('arg',), {}, 'extra/arg/'),
('named-url2', (), {'extra': 'arg'}, 'extra/arg/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(resolver.reverse(name, *args, **kwargs), expected)
def test_resolver_reverse_conflict(self):
"""
URL pattern name arguments don't need to be unique. The last registered
pattern takes precedence for conflicting names.
"""
resolver = get_resolver('urlpatterns_reverse.named_urls_conflict')
test_urls = [
# (name, args, kwargs, expected)
# Without arguments, the last URL in urlpatterns has precedence.
('name-conflict', (), {}, 'conflict/'),
# With an arg, the last URL in urlpatterns has precedence.
('name-conflict', ('arg',), {}, 'conflict-last/arg/'),
# With a kwarg, other URL patterns can be reversed.
('name-conflict', (), {'first': 'arg'}, 'conflict-first/arg/'),
('name-conflict', (), {'middle': 'arg'}, 'conflict-middle/arg/'),
('name-conflict', (), {'last': 'arg'}, 'conflict-last/arg/'),
# The number and order of the arguments don't interfere with reversing.
('name-conflict', ('arg', 'arg'), {}, 'conflict/arg/arg/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(resolver.reverse(name, *args, **kwargs), expected)
def test_non_regex(self):
"""
A Resolver404 is raised if resolving doesn't meet the basic
requirements of a path to match - i.e., at the very least, it matches
the root pattern '^/'. Never return None from resolve() to prevent a
TypeError from occurring later (#10834).
"""
test_urls = ['', 'a', '\\', '.']
for path_ in test_urls:
with self.subTest(path=path_):
with self.assertRaises(Resolver404):
resolve(path_)
def test_404_tried_urls_have_names(self):
"""
The list of URLs that come back from a Resolver404 exception contains
a list in the right format for printing out in the DEBUG 404 page with
both the patterns and URL names, if available.
"""
urls = 'urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a nonexistent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/nonexistent-url')
url_types_names = [
[{'type': URLPattern, 'name': 'named-url1'}],
[{'type': URLPattern, 'name': 'named-url2'}],
[{'type': URLPattern, 'name': None}],
[{'type': URLResolver}, {'type': URLPattern, 'name': 'named-url3'}],
[{'type': URLResolver}, {'type': URLPattern, 'name': 'named-url4'}],
[{'type': URLResolver}, {'type': URLPattern, 'name': None}],
[{'type': URLResolver}, {'type': URLResolver}],
]
with self.assertRaisesMessage(Resolver404, 'tried') as cm:
resolve('/included/nonexistent-url', urlconf=urls)
e = cm.exception
# make sure we at least matched the root ('/') url resolver:
self.assertIn('tried', e.args[0])
self.assertEqual(
len(e.args[0]['tried']),
len(url_types_names),
'Wrong number of tried URLs returned. Expected %s, got %s.' % (
len(url_types_names), len(e.args[0]['tried'])
)
)
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
with self.subTest(t):
self.assertIsInstance(t, e['type']), '%s is not an instance of %s' % (t, e['type'])
if 'name' in e:
if not e['name']:
self.assertIsNone(t.name, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(
t.name,
e['name'],
'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name)
)
def test_namespaced_view_detail(self):
resolver = get_resolver('urlpatterns_reverse.nested_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view1'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view2'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.View3'))
self.assertFalse(resolver._is_callback('urlpatterns_reverse.nested_urls.blub'))
def test_view_detail_as_method(self):
# Views which have a class name as part of their path.
resolver = get_resolver('urlpatterns_reverse.method_view_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.method_view'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.classmethod_view'))
def test_populate_concurrency(self):
"""
URLResolver._populate() can be called concurrently, but not more
than once per thread (#26888).
"""
resolver = URLResolver(RegexPattern(r'^/'), 'urlpatterns_reverse.urls')
resolver._local.populating = True
thread = threading.Thread(target=resolver._populate)
thread.start()
thread.join()
self.assertNotEqual(resolver._reverse_dict, {})
@override_settings(ROOT_URLCONF='urlpatterns_reverse.reverse_lazy_urls')
class ReverseLazyTest(TestCase):
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=302)
def test_user_permission_with_lazy_reverse(self):
alfred = User.objects.create_user('alfred', 'alfred@example.com', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.force_login(alfred)
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
def test_inserting_reverse_lazy_into_string(self):
self.assertEqual(
'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
class ReverseLazySettingsTest(AdminScriptTestCase):
"""
reverse_lazy can be used in settings without causing a circular
import error.
"""
def setUp(self):
self.write_settings(
'settings.py',
extra="from django.urls import reverse_lazy\nLOGIN_URL = reverse_lazy('login')",
)
def tearDown(self):
self.remove_settings('settings.py')
def test_lazy_in_settings(self):
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class ReverseShortcutTests(SimpleTestCase):
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj:
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res.url, '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res.url, '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res.url, '/headlines/2008.02.17/')
with self.assertRaises(NoReverseMatch):
redirect('not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res.url, '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res.url, 'http://example.com/')
# Assert that we can redirect using UTF-8 strings
res = redirect('/æøå/abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5/abc/')
# Assert that no imports are attempted when dealing with a relative path
# (previously, the below would resolve in a UnicodeEncodeError from __import__ )
res = redirect('/æøå.abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5.abc/')
res = redirect('os.path')
self.assertEqual(res.url, 'os.path')
def test_no_illegal_imports(self):
# modules that are not listed in urlpatterns should not be importable
redirect("urlpatterns_reverse.nonimported_module.view")
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
def test_reverse_by_path_nested(self):
# Views added to urlpatterns using include() should be reversible.
from .views import nested_view
self.assertEqual(reverse(nested_view), '/includes/nested_path/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, '/absolute_arg_view/')
with self.assertRaises(NoReverseMatch):
redirect(absolute_kwargs_view, wrong_argument=None)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class NamespaceTests(SimpleTestCase):
def test_ambiguous_object(self):
"""
Names deployed via dynamic URL objects that require namespaces can't
be resolved.
"""
test_urls = [
('urlobject-view', [], {}),
('urlobject-view', [37, 42], {}),
('urlobject-view', [], {'arg1': 42, 'arg2': 37}),
]
for name, args, kwargs in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, args=args, kwargs=kwargs)
def test_ambiguous_urlpattern(self):
"""
Names deployed via dynamic URL objects that require namespaces can't
be resolved.
"""
test_urls = [
('inner-nothing', [], {}),
('inner-nothing', [37, 42], {}),
('inner-nothing', [], {'arg1': 42, 'arg2': 37}),
]
for name, args, kwargs in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, args=args, kwargs=kwargs)
def test_non_existent_namespace(self):
"""Nonexistent namespaces raise errors."""
test_urls = [
'blahblah:urlobject-view',
'test-ns1:blahblah:urlobject-view',
]
for name in test_urls:
with self.subTest(name=name):
with self.assertRaises(NoReverseMatch):
reverse(name)
def test_normal_name(self):
"""Normal lookups work as expected."""
test_urls = [
('normal-view', [], {}, '/normal/'),
('normal-view', [37, 42], {}, '/normal/37/42/'),
('normal-view', [], {'arg1': 42, 'arg2': 37}, '/normal/42/37/'),
('special-view', [], {}, '/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_simple_included_name(self):
"""Normal lookups work on names included from other patterns."""
test_urls = [
('included_namespace_urls:inc-normal-view', [], {}, '/included/normal/'),
('included_namespace_urls:inc-normal-view', [37, 42], {}, '/included/normal/37/42/'),
('included_namespace_urls:inc-normal-view', [], {'arg1': 42, 'arg2': 37}, '/included/normal/42/37/'),
('included_namespace_urls:inc-special-view', [], {}, '/included/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_object(self):
"""Dynamic URL objects can be found using a namespace."""
test_urls = [
('test-ns1:urlobject-view', [], {}, '/test1/inner/'),
('test-ns1:urlobject-view', [37, 42], {}, '/test1/inner/37/42/'),
('test-ns1:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/test1/inner/42/37/'),
('test-ns1:urlobject-special-view', [], {}, '/test1/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_object(self):
"""
Dynamic URL objects can return a (pattern, app_name) 2-tuple, and
include() can set the namespace.
"""
test_urls = [
('new-ns1:urlobject-view', [], {}, '/newapp1/inner/'),
('new-ns1:urlobject-view', [37, 42], {}, '/newapp1/inner/37/42/'),
('new-ns1:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/newapp1/inner/42/37/'),
('new-ns1:urlobject-special-view', [], {}, '/newapp1/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_object_default_namespace(self):
"""
Namespace defaults to app_name when including a (pattern, app_name)
2-tuple.
"""
test_urls = [
('newapp:urlobject-view', [], {}, '/new-default/inner/'),
('newapp:urlobject-view', [37, 42], {}, '/new-default/inner/37/42/'),
('newapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/new-default/inner/42/37/'),
('newapp:urlobject-special-view', [], {}, '/new-default/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_embedded_namespace_object(self):
"""Namespaces can be installed anywhere in the URL pattern tree."""
test_urls = [
('included_namespace_urls:test-ns3:urlobject-view', [], {}, '/included/test3/inner/'),
('included_namespace_urls:test-ns3:urlobject-view', [37, 42], {}, '/included/test3/inner/37/42/'),
(
'included_namespace_urls:test-ns3:urlobject-view', [], {'arg1': 42, 'arg2': 37},
'/included/test3/inner/42/37/',
),
('included_namespace_urls:test-ns3:urlobject-special-view', [], {}, '/included/test3/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_pattern(self):
"""Namespaces can be applied to include()'d urlpatterns."""
test_urls = [
('inc-ns1:inc-normal-view', [], {}, '/ns-included1/normal/'),
('inc-ns1:inc-normal-view', [37, 42], {}, '/ns-included1/normal/37/42/'),
('inc-ns1:inc-normal-view', [], {'arg1': 42, 'arg2': 37}, '/ns-included1/normal/42/37/'),
('inc-ns1:inc-special-view', [], {}, '/ns-included1/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_name_pattern(self):
"""
Namespaces can be applied to include()'d urlpatterns that set an
app_name attribute.
"""
test_urls = [
('app-ns1:inc-normal-view', [], {}, '/app-included1/normal/'),
('app-ns1:inc-normal-view', [37, 42], {}, '/app-included1/normal/37/42/'),
('app-ns1:inc-normal-view', [], {'arg1': 42, 'arg2': 37}, '/app-included1/normal/42/37/'),
('app-ns1:inc-special-view', [], {}, '/app-included1/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_pattern_with_variable_prefix(self):
"""
Using include() with namespaces when there is a regex variable in front
of it.
"""
test_urls = [
('inc-outer:inc-normal-view', [], {'outer': 42}, '/ns-outer/42/normal/'),
('inc-outer:inc-normal-view', [42], {}, '/ns-outer/42/normal/'),
('inc-outer:inc-normal-view', [], {'arg1': 37, 'arg2': 4, 'outer': 42}, '/ns-outer/42/normal/37/4/'),
('inc-outer:inc-normal-view', [42, 37, 4], {}, '/ns-outer/42/normal/37/4/'),
('inc-outer:inc-special-view', [], {'outer': 42}, '/ns-outer/42/+%5C$*/'),
('inc-outer:inc-special-view', [42], {}, '/ns-outer/42/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_multiple_namespace_pattern(self):
"""Namespaces can be embedded."""
test_urls = [
('inc-ns1:test-ns3:urlobject-view', [], {}, '/ns-included1/test3/inner/'),
('inc-ns1:test-ns3:urlobject-view', [37, 42], {}, '/ns-included1/test3/inner/37/42/'),
(
'inc-ns1:test-ns3:urlobject-view', [], {'arg1': 42, 'arg2': 37},
'/ns-included1/test3/inner/42/37/',
),
('inc-ns1:test-ns3:urlobject-special-view', [], {}, '/ns-included1/test3/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_nested_namespace_pattern(self):
"""Namespaces can be nested."""
test_urls = [
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', [], {},
'/ns-included1/ns-included4/ns-included1/test3/inner/',
),
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', [37, 42], {},
'/ns-included1/ns-included4/ns-included1/test3/inner/37/42/',
),
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', [], {'arg1': 42, 'arg2': 37},
'/ns-included1/ns-included4/ns-included1/test3/inner/42/37/',
),
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view', [], {},
'/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/',
),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_lookup_object(self):
"""A default application namespace can be used for lookup."""
test_urls = [
('testapp:urlobject-view', [], {}, '/default/inner/'),
('testapp:urlobject-view', [37, 42], {}, '/default/inner/37/42/'),
('testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/default/inner/42/37/'),
('testapp:urlobject-special-view', [], {}, '/default/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_lookup_object_with_default(self):
"""A default application namespace is sensitive to the current app."""
test_urls = [
('testapp:urlobject-view', [], {}, 'test-ns3', '/default/inner/'),
('testapp:urlobject-view', [37, 42], {}, 'test-ns3', '/default/inner/37/42/'),
('testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'test-ns3', '/default/inner/42/37/'),
('testapp:urlobject-special-view', [], {}, 'test-ns3', '/default/inner/+%5C$*/'),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
def test_app_lookup_object_without_default(self):
"""
An application namespace without a default is sensitive to the current
app.
"""
test_urls = [
('nodefault:urlobject-view', [], {}, None, '/other2/inner/'),
('nodefault:urlobject-view', [37, 42], {}, None, '/other2/inner/37/42/'),
('nodefault:urlobject-view', [], {'arg1': 42, 'arg2': 37}, None, '/other2/inner/42/37/'),
('nodefault:urlobject-special-view', [], {}, None, '/other2/inner/+%5C$*/'),
('nodefault:urlobject-view', [], {}, 'other-ns1', '/other1/inner/'),
('nodefault:urlobject-view', [37, 42], {}, 'other-ns1', '/other1/inner/37/42/'),
('nodefault:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'other-ns1', '/other1/inner/42/37/'),
('nodefault:urlobject-special-view', [], {}, 'other-ns1', '/other1/inner/+%5C$*/'),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
def test_special_chars_namespace(self):
test_urls = [
('special:included_namespace_urls:inc-normal-view', [], {}, '/+%5C$*/included/normal/'),
('special:included_namespace_urls:inc-normal-view', [37, 42], {}, '/+%5C$*/included/normal/37/42/'),
(
'special:included_namespace_urls:inc-normal-view', [], {'arg1': 42, 'arg2': 37},
'/+%5C$*/included/normal/42/37/',
),
('special:included_namespace_urls:inc-special-view', [], {}, '/+%5C$*/included/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespaces_with_variables(self):
"""Namespace prefixes can capture variables."""
test_urls = [
('inc-ns5:inner-nothing', [], {'outer': '70'}, '/inc70/'),
('inc-ns5:inner-extra', [], {'extra': 'foobar', 'outer': '78'}, '/inc78/extra/foobar/'),
('inc-ns5:inner-nothing', ['70'], {}, '/inc70/'),
('inc-ns5:inner-extra', ['78', 'foobar'], {}, '/inc78/extra/foobar/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_nested_app_lookup(self):
"""
A nested current_app should be split in individual namespaces (#24904).
"""
test_urls = [
('inc-ns1:testapp:urlobject-view', [], {}, None, '/ns-included1/test4/inner/'),
('inc-ns1:testapp:urlobject-view', [37, 42], {}, None, '/ns-included1/test4/inner/37/42/'),
('inc-ns1:testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, None, '/ns-included1/test4/inner/42/37/'),
('inc-ns1:testapp:urlobject-special-view', [], {}, None, '/ns-included1/test4/inner/+%5C$*/'),
('inc-ns1:testapp:urlobject-view', [], {}, 'inc-ns1:test-ns3', '/ns-included1/test3/inner/'),
('inc-ns1:testapp:urlobject-view', [37, 42], {}, 'inc-ns1:test-ns3', '/ns-included1/test3/inner/37/42/'),
(
'inc-ns1:testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'inc-ns1:test-ns3',
'/ns-included1/test3/inner/42/37/',
),
(
'inc-ns1:testapp:urlobject-special-view', [], {}, 'inc-ns1:test-ns3',
'/ns-included1/test3/inner/+%5C$*/',
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
def test_current_app_no_partial_match(self):
"""current_app shouldn't be used unless it matches the whole path."""
test_urls = [
('inc-ns1:testapp:urlobject-view', [], {}, 'nonexistent:test-ns3', '/ns-included1/test4/inner/'),
(
'inc-ns1:testapp:urlobject-view', [37, 42], {}, 'nonexistent:test-ns3',
'/ns-included1/test4/inner/37/42/',
),
(
'inc-ns1:testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'nonexistent:test-ns3',
'/ns-included1/test4/inner/42/37/',
),
(
'inc-ns1:testapp:urlobject-special-view', [], {}, 'nonexistent:test-ns3',
'/ns-included1/test4/inner/+%5C$*/',
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
@override_settings(ROOT_URLCONF=urlconf_outer.__name__)
class RequestURLconfTests(SimpleTestCase):
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden_with_null(self):
"""
Overriding request.urlconf with None will fall back to the default
URLconf.
"""
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
msg = (
"Reverse for 'outer' not found. 'outer' is not a valid view "
"function or pattern name."
)
with self.assertRaisesMessage(NoReverseMatch, msg):
self.client.get('/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInStreaming' % middleware.__name__,
]
)
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(b''.join(response), b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInStreaming' % middleware.__name__,
]
)
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
message = "Reverse for 'outer' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
b''.join(self.client.get('/second_test/'))
def test_urlconf_is_reset_after_request(self):
"""The URLconf is reset after each request."""
self.assertIsNone(get_urlconf())
with override_settings(MIDDLEWARE=['%s.ChangeURLconfMiddleware' % middleware.__name__]):
self.client.get(reverse('inner'))
self.assertIsNone(get_urlconf())
class ErrorHandlerResolutionTests(SimpleTestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
urlconf = 'urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = URLResolver(RegexPattern(r'^$'), urlconf)
self.callable_resolver = URLResolver(RegexPattern(r'^$'), urlconf_callables)
def test_named_handlers(self):
handler = (empty_view, {})
for code in [400, 404, 500]:
with self.subTest(code=code):
self.assertEqual(self.resolver.resolve_error_handler(code), handler)
def test_callable_handlers(self):
handler = (empty_view, {})
for code in [400, 404, 500]:
with self.subTest(code=code):
self.assertEqual(self.callable_resolver.resolve_error_handler(code), handler)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls_without_handlers')
class DefaultErrorHandlerTests(SimpleTestCase):
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
msg = "I don't think I'm getting good value for this view"
with self.assertRaisesMessage(ValueError, msg):
self.client.get('/bad_view/')
@override_settings(ROOT_URLCONF=None)
class NoRootUrlConfTests(SimpleTestCase):
"""Tests for handler404 and handler500 if ROOT_URLCONF is None"""
def test_no_handler_exception(self):
msg = (
"The included URLconf 'None' does not appear to have any patterns "
"in it. If you see valid patterns in the file then the issue is "
"probably caused by a circular import."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.client.get('/test/me/')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class ResolverMatchTests(SimpleTestCase):
def test_urlpattern_resolve(self):
for path_, url_name, app_name, namespace, view_name, func, args, kwargs in resolve_test_data:
with self.subTest(path=path_):
# Legacy support for extracting "function, args, kwargs".
match_func, match_args, match_kwargs = resolve(path_)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# ResolverMatch capabilities.
match = resolve(path_)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.view_name, view_name)
self.assertEqual(match.func, func)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
# and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
def test_repr(self):
self.assertEqual(
repr(resolve('/no_kwargs/42/37/')),
"ResolverMatch(func=urlpatterns_reverse.views.empty_view, "
"args=('42', '37'), kwargs={}, url_name=no-kwargs, app_names=[], "
"namespaces=[], route=^no_kwargs/([0-9]+)/([0-9]+)/$)",
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.erroneous_urls')
class ErroneousViewTests(SimpleTestCase):
def test_noncallable_view(self):
# View is not a callable (explicit import; arbitrary Python object)
with self.assertRaisesMessage(TypeError, 'view must be a callable'):
path('uncallable-object/', views.uncallable)
def test_invalid_regex(self):
# Regex contains an error (refs #6170)
msg = '(regex_error/$" is not a valid regular expression'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
reverse(views.empty_view)
class ViewLoadingTests(SimpleTestCase):
def test_view_loading(self):
self.assertEqual(get_callable('urlpatterns_reverse.views.empty_view'), empty_view)
self.assertEqual(get_callable(empty_view), empty_view)
def test_view_does_not_exist(self):
msg = "View does not exist in module urlpatterns_reverse.views."
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable('urlpatterns_reverse.views.i_should_not_exist')
def test_attributeerror_not_hidden(self):
msg = 'I am here to confuse django.urls.get_callable'
with self.assertRaisesMessage(AttributeError, msg):
get_callable('urlpatterns_reverse.views_broken.i_am_broken')
def test_non_string_value(self):
msg = "'1' is not a callable or a dot-notation path"
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable(1)
def test_string_without_dot(self):
msg = "Could not import 'test'. The path must be fully qualified."
with self.assertRaisesMessage(ImportError, msg):
get_callable('test')
def test_module_does_not_exist(self):
with self.assertRaisesMessage(ImportError, "No module named 'foo'"):
get_callable('foo.bar')
def test_parent_module_does_not_exist(self):
msg = 'Parent module urlpatterns_reverse.foo does not exist.'
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable('urlpatterns_reverse.foo.bar')
def test_not_callable(self):
msg = (
"Could not import 'urlpatterns_reverse.tests.resolve_test_data'. "
"View is not callable."
)
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable('urlpatterns_reverse.tests.resolve_test_data')
class IncludeTests(SimpleTestCase):
url_patterns = [
path('inner/', views.empty_view, name='urlobject-view'),
re_path(r'^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='urlobject-view'),
re_path(r'^inner/\+\\\$\*/$', views.empty_view, name='urlobject-special-view'),
]
app_urls = URLObject('inc-app')
def test_include_urls(self):
self.assertEqual(include(self.url_patterns), (self.url_patterns, None, None))
def test_include_namespace(self):
msg = (
'Specifying a namespace in include() without providing an '
'app_name is not supported.'
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include(self.url_patterns, 'namespace')
def test_include_4_tuple(self):
msg = 'Passing a 4-tuple to include() is not supported.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace', 'blah'))
def test_include_3_tuple(self):
msg = 'Passing a 3-tuple to include() is not supported.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace'))
def test_include_3_tuple_namespace(self):
msg = 'Cannot override the namespace for a dynamic module that provides a namespace.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace'), 'namespace')
def test_include_2_tuple(self):
self.assertEqual(
include((self.url_patterns, 'app_name')),
(self.url_patterns, 'app_name', 'app_name')
)
def test_include_2_tuple_namespace(self):
self.assertEqual(
include((self.url_patterns, 'app_name'), namespace='namespace'),
(self.url_patterns, 'app_name', 'namespace')
)
def test_include_app_name(self):
self.assertEqual(
include(self.app_urls),
(self.app_urls, 'inc-app', 'inc-app')
)
def test_include_app_name_namespace(self):
self.assertEqual(
include(self.app_urls, 'namespace'),
(self.app_urls, 'inc-app', 'namespace')
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class LookaheadTests(SimpleTestCase):
def test_valid_resolve(self):
test_urls = [
'/lookahead-/a-city/',
'/lookbehind-/a-city/',
'/lookahead+/a-city/',
'/lookbehind+/a-city/',
]
for test_url in test_urls:
with self.subTest(url=test_url):
self.assertEqual(resolve(test_url).kwargs, {'city': 'a-city'})
def test_invalid_resolve(self):
test_urls = [
'/lookahead-/not-a-city/',
'/lookbehind-/not-a-city/',
'/lookahead+/other-city/',
'/lookbehind+/other-city/',
]
for test_url in test_urls:
with self.subTest(url=test_url):
with self.assertRaises(Resolver404):
resolve(test_url)
def test_valid_reverse(self):
test_urls = [
('lookahead-positive', {'city': 'a-city'}, '/lookahead+/a-city/'),
('lookahead-negative', {'city': 'a-city'}, '/lookahead-/a-city/'),
('lookbehind-positive', {'city': 'a-city'}, '/lookbehind+/a-city/'),
('lookbehind-negative', {'city': 'a-city'}, '/lookbehind-/a-city/'),
]
for name, kwargs, expected in test_urls:
with self.subTest(name=name, kwargs=kwargs):
self.assertEqual(reverse(name, kwargs=kwargs), expected)
def test_invalid_reverse(self):
test_urls = [
('lookahead-positive', {'city': 'other-city'}),
('lookahead-negative', {'city': 'not-a-city'}),
('lookbehind-positive', {'city': 'other-city'}),
('lookbehind-negative', {'city': 'not-a-city'}),
]
for name, kwargs in test_urls:
with self.subTest(name=name, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, kwargs=kwargs)
|
mp_spotfinder_server_read_file.py
|
from __future__ import division
from BaseHTTPServer import HTTPServer
import cgi, sys
from multiprocessing import Process, current_process
from urlparse import urlparse
#backward compatibility with Python 2.5
try: from urlparse import parse_qs
except Exception: from cgi import parse_qs
def note(format, *args):
sys.stderr.write('[%s]\t%s\n' % (current_process().name, format%args))
from spotfinder.servers.spotfinder_server_read_file import image_request_handler as irhbase
from spotfinder.servers.spotfinder_server_read_file import generate_common_parameters # import dependency
class image_request_handler(irhbase):
def log_message(self, format, *args):
note(format, *args)
def serve_forever(server):
note('starting server')
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def runpool(address, number_of_processes,handler):
# create a single server object -- children will each inherit a copy
server = HTTPServer(address, handler)
# create child processes to act as workers
for i in range(number_of_processes-1):
Process(target=serve_forever, args=(server,)).start()
# main process also acts as a worker
serve_forever(server)
|
test_channel.py
|
#!/usr/bin/python
#
# Server that will accept connections from a Vim channel.
# Used by test_channel.vim.
#
# This requires Python 2.6 or later.
from __future__ import print_function
import json
import socket
import sys
import time
import threading
try:
# Python 3
import socketserver
except ImportError:
# Python 2
import SocketServer as socketserver
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
print("=== socket opened ===")
while True:
try:
received = self.request.recv(4096).decode('utf-8')
except socket.error:
print("=== socket error ===")
break
except IOError:
print("=== socket closed ===")
break
if received == '':
print("=== socket closed ===")
break
print("received: {}".format(received))
# We may receive two messages at once. Take the part up to the
# matching "]" (recognized by finding "][").
todo = received
while todo != '':
splitidx = todo.find('][')
if splitidx < 0:
used = todo
todo = ''
else:
used = todo[:splitidx + 1]
todo = todo[splitidx + 1:]
if used != received:
print("using: {}".format(used))
try:
decoded = json.loads(used)
except ValueError:
print("json decoding failed")
decoded = [-1, '']
# Send a response if the sequence number is positive.
if decoded[0] >= 0:
if decoded[1] == 'hello!':
# simply send back a string
response = "got it"
elif decoded[1].startswith("echo "):
# send back the argument
response = decoded[1][5:]
elif decoded[1] == 'make change':
# Send two ex commands at the same time, before
# replying to the request.
cmd = '["ex","call append(\\"$\\",\\"added1\\")"]'
cmd += '["ex","call append(\\"$\\",\\"added2\\")"]'
print("sending: {}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'do normal':
# Send a normal command.
cmd = '["normal","G$s more\u001b"]'
print("sending: {}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-works':
# Send an eval request. We ignore the response.
cmd = '["expr","\\"foo\\" . 123", -1]'
print("sending: {}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-fails':
# Send an eval request that will fail.
cmd = '["expr","xxx", -2]'
print("sending: {}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-error':
# Send an eval request that works but the result can't
# be encoded.
cmd = '["expr","function(\\"tr\\")", -3]'
print("sending: {}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-bad':
# Send an eval request missing the third argument.
cmd = '["expr","xxx"]'
print("sending: {}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'an expr':
# Send an expr request.
cmd = '["expr","setline(\\"$\\", [\\"one\\",\\"two\\",\\"three\\"])"]'
print("sending: {}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'call-func':
cmd = '["call","MyFunction",[1,2,3], 0]'
print("sending: {}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'redraw':
cmd = '["redraw",""]'
print("sending: {}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'redraw!':
cmd = '["redraw","force"]'
print("sending: {}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'empty-request':
cmd = '[]'
print("sending: {}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-result':
# Send back the last received eval result.
response = last_eval
elif decoded[1] == 'call me':
cmd = '[0,"we called you"]'
print("sending: {}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'call me again':
cmd = '[0,"we did call you"]'
print("sending: {}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = ""
elif decoded[1] == 'send zero':
cmd = '[0,"zero index"]'
print("sending: {}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "sent zero"
elif decoded[1] == 'close me':
print("closing")
self.request.close()
response = ""
elif decoded[1] == 'wait a bit':
time.sleep(0.2)
response = "waited"
elif decoded[1] == '!quit!':
# we're done
self.server.shutdown()
return
elif decoded[1] == '!crash!':
# Crash!
42 / 0
else:
response = "what?"
if response == "":
print("no response")
else:
encoded = json.dumps([decoded[0], response])
print("sending: {}".format(encoded))
self.request.sendall(encoded.encode('utf-8'))
# Negative numbers are used for "eval" responses.
elif decoded[0] < 0:
last_eval = decoded
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def writePortInFile(port):
# Write the port number in Xportnr, so that the test knows it.
f = open("Xportnr", "w")
f.write("{}".format(port))
f.close()
if __name__ == "__main__":
HOST, PORT = "localhost", 0
# Wait half a second before opening the port to test waittime in ch_open().
# We do want to get the port number, get that first. We cannot open the
# socket, guess a port is free.
if len(sys.argv) >= 2 and sys.argv[1] == 'delay':
PORT = 13684
writePortInFile(PORT)
print("Wait for it...")
time.sleep(0.5)
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
# Start a thread with the server. That thread will then start a new thread
# for each connection.
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
writePortInFile(port)
print("Listening on port {}".format(port))
# Main thread terminates, but the server continues running
# until server.shutdown() is called.
try:
while server_thread.isAlive():
server_thread.join(1)
except (KeyboardInterrupt, SystemExit):
server.shutdown()
|
xla_client_test.py
|
# Lint as: python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Python extension-based XLA client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import threading
import unittest
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.xla.python import custom_call_for_test
from tensorflow.compiler.xla.python import xla_client
# pylint: disable=g-import-not-at-top
try:
import portpicker
except ImportError:
portpicker = None
# pylint: enable=g-import-not-at-top
bfloat16 = xla_client.bfloat16
ops = xla_client.ops
class ComputationTest(absltest.TestCase):
"""Base class for running an XLA Computation through the local client."""
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.XlaBuilder(name)
def _Execute(self, c, arguments):
backend = xla_client.get_local_backend()
compiled_c = backend.compile(c.Build())
return xla_client.execute_with_python_values(compiled_c, arguments)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
results = self._Execute(c, arguments)
self.assertLen(results, len(expected))
for result, e in zip(results, expected):
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(e).shape)
assert_func(result, e)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)
def _ExecuteAndCompareClose(self,
c,
arguments=(),
expected=None,
rtol=1e-7,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol), c,
arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayS64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int64 dtype."""
return np.array(*args, dtype=np.int64, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationPrinting(absltest.TestCase):
def ExampleComputation(self):
builder = xla_client.XlaBuilder("acomputation")
p0 = ops.Parameter(builder, 0, xla_client.shape_from_pyval(np.float32(0)))
p1 = ops.Parameter(builder, 1,
xla_client.shape_from_pyval(np.zeros((4,), np.float32)))
x = ops.Mul(p0, p1)
ops.Add(x, x)
return builder.Build()
def testComputationToHloText(self):
computation = self.ExampleComputation()
hlo_text = computation.GetHloText()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
def testComputationToHloGraph(self):
computation = self.ExampleComputation()
hlo_dot_graph = computation.GetHloDotGraph()
self.assertTrue(hlo_dot_graph.startswith("digraph "))
def testHloModuleToHloText(self):
computation = self.ExampleComputation()
hlo_text = computation.get_hlo_module().to_string()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
def testHloModuleToHloGraph(self):
computation = self.ExampleComputation()
hlo_dot_graph = xla_client._xla.hlo_module_to_dot_graph(
computation.get_hlo_module())
self.assertTrue(hlo_dot_graph.startswith("digraph "))
def testCompiledHloModuleToHloText(self):
computation = self.ExampleComputation()
backend = xla_client.get_local_backend()
executable = backend.compile(computation)
hlo_modules = executable.get_hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
self.assertIn("fusion", hlo_text)
class ComputationHashTest(absltest.TestCase):
def testHash(self):
builder0 = xla_client.XlaBuilder("computation0")
p0 = ops.Parameter(builder0, 0, xla_client.shape_from_pyval(np.float32(0)))
p1 = ops.Parameter(builder0, 1,
xla_client.shape_from_pyval(np.zeros((4,), np.float32)))
ops.Mul(p0, p1)
computation0 = builder0.Build()
builder1 = xla_client.XlaBuilder("computation1")
p0 = ops.Parameter(builder1, 0, xla_client.shape_from_pyval(np.float32(0)))
p1 = ops.Parameter(builder1, 1,
xla_client.shape_from_pyval(np.zeros((4,), np.float32)))
ops.Mul(p0, p1)
computation1 = builder1.Build()
self.assertEqual(computation0.Hash(), computation1.Hash())
class ComputationsWithConstantsTest(ComputationTest):
"""Tests focusing on Constant ops."""
def testConstantScalarSumS8(self):
c = self._NewComputation()
ops.Add(ops.Constant(c, np.int8(1)), ops.Constant(c, np.int8(2)))
self._ExecuteAndCompareExact(c, expected=[np.int8(3)])
def testConstantScalarSumBF16(self):
c = self._NewComputation()
ops.Add(ops.Constant(c, bfloat16(1.11)), ops.Constant(c, bfloat16(3.14)))
self._ExecuteAndCompareClose(c, expected=[bfloat16(4.25)])
def testConstantScalarSumF32(self):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.float32(1.11)), ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(c, expected=[4.25])
def testConstantScalarSumF64(self):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.float64(1.11)), ops.Constant(c, np.float64(3.14)))
self._ExecuteAndCompareClose(c, expected=[4.25])
def testConstantScalarSumS32(self):
c = self._NewComputation()
ops.Add(ops.Constant(c, np.int32(1)), ops.Constant(c, np.int32(2)))
self._ExecuteAndCompareClose(c, expected=[3])
def testConstantScalarSumS64(self):
c = self._NewComputation()
ops.Add(ops.Constant(c, np.int64(1)), ops.Constant(c, np.int64(2)))
self._ExecuteAndCompareClose(c, expected=[3])
def testConstantVectorMulF16(self):
c = self._NewComputation()
ops.Mul(
ops.Constant(c, np.array([2.5, 3.3, -1.2, 0.7], np.float16)),
ops.Constant(c, np.array([-1.2, 2, -2, -3], np.float16)))
self._ExecuteAndCompareClose(
c, expected=[np.array([-3, 6.6, 2.4, -2.1], np.float16)], rtol=2e-3)
def testConstantVectorMulF32(self):
c = self._NewComputation()
ops.Mul(
ops.Constant(c, NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),
ops.Constant(c, NumpyArrayF32([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[[-3, 6.6, 2.4, -2.1]])
def testConstantVectorMulF64(self):
c = self._NewComputation()
ops.Mul(
ops.Constant(c, NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),
ops.Constant(c, NumpyArrayF64([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[[-3, 6.6, 2.4, -2.1]])
def testConstantVectorScalarDivF32(self):
c = self._NewComputation()
ops.Div(
ops.Constant(c, NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),
ops.Constant(c, np.float32(2.0)))
self._ExecuteAndCompareClose(c, expected=[[0.75, 1.25, 1.5, -5.4]])
def testConstantVectorScalarDivF64(self):
c = self._NewComputation()
ops.Div(
ops.Constant(c, NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),
ops.Constant(c, np.float64(2.0)))
self._ExecuteAndCompareClose(c, expected=[[0.75, 1.25, 1.5, -5.4]])
def testConstantVectorScalarPowF32(self):
c = self._NewComputation()
ops.Pow(
ops.Constant(c, NumpyArrayF32([1.5, 2.5, 3.0])),
ops.Constant(c, np.float32(2.)))
self._ExecuteAndCompareClose(c, expected=[[2.25, 6.25, 9.]])
def testConstantVectorScalarPowF64(self):
c = self._NewComputation()
ops.Pow(
ops.Constant(c, NumpyArrayF64([1.5, 2.5, 3.0])),
ops.Constant(c, np.float64(2.)))
self._ExecuteAndCompareClose(c, expected=[[2.25, 6.25, 9.]])
def testIota(self):
c = self._NewComputation()
ops.Iota(c, xla_client.PrimitiveType.F32, 10)
self._ExecuteAndCompareExact(c, expected=[np.arange(10, dtype=np.float32)])
def testBroadcastedIota(self):
c = self._NewComputation()
shape = xla_client.Shape.array_shape(xla_client.PrimitiveType.S64, (2, 3))
ops.Iota(c, shape, 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=np.int64)
self._ExecuteAndCompareExact(c, expected=[expected])
def testBooleanAnd(self):
c = self._NewComputation()
ops.And(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, False]])
def testBooleanOr(self):
c = self._NewComputation()
ops.Or(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, True, True, False]])
def testBooleanXor(self):
c = self._NewComputation()
ops.Xor(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
def testSum2DF32(self):
c = self._NewComputation()
ops.Add(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
ops.Constant(c, NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[[2, 1, 4], [3, 6, 5]]])
def testShiftLeft(self):
c = self._NewComputation()
ops.ShiftLeft(
ops.Constant(c, NumpyArrayS32([3])),
ops.Constant(c, NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[[12]])
def testShiftRightArithmetic(self):
c = self._NewComputation()
ops.ShiftRightArithmetic(
ops.Constant(c, NumpyArrayS32([-2])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[-1]])
def testShiftRightLogical(self):
c = self._NewComputation()
ops.ShiftRightLogical(
ops.Constant(c, NumpyArrayS32([-1])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[2**31 - 1]])
def testSum2DF64(self):
c = self._NewComputation()
ops.Add(
ops.Constant(c, NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),
ops.Constant(c, NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[[2, 1, 4], [3, 6, 5]]])
def testSum2DWith1DBroadcastDim0F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 12, 13], [24, 25, 26], [37, 38, 39]]])
def testSum2DWith1DBroadcastDim0F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c, NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 12, 13], [24, 25, 26], [37, 38, 39]]])
def testSum2DWith1DBroadcastDim1F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 22, 33], [14, 25, 36], [17, 28, 39]]])
def testSum2DWith1DBroadcastDim1F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c, NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 22, 33], [14, 25, 36], [17, 28, 39]]])
def testConstantAxpyF32(self):
c = self._NewComputation()
ops.Add(
ops.Mul(
ops.Constant(c, np.float32(2)),
ops.Constant(c, NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),
ops.Constant(c, NumpyArrayF32([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[[104.4, -93.4, 208.8, -189]])
def testConstantAxpyF64(self):
c = self._NewComputation()
ops.Add(
ops.Mul(
ops.Constant(c, np.float64(2)),
ops.Constant(c, NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),
ops.Constant(c, NumpyArrayF64([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[[104.4, -93.4, 208.8, -189]])
def testCustomCall(self):
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_custom_call_target(name, fn, platform="cpu")
ops.CustomCallWithLayout(
c,
b"test_subtract_f32",
operands=[
ops.Constant(c, np.float32(1.25)),
ops.Constant(c, np.float32(0.5))
],
shape_with_layout=xla_client.Shape.array_shape(
np.dtype(np.float32), (), ()),
operand_shapes_with_layout=[
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
])
self._ExecuteAndCompareClose(c, expected=[0.75])
class ComputationFromProtoTest(absltest.TestCase):
"""Test computation execution from HLO proto."""
def testExecuteFromProto(self):
# Build the HLO proto
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int8(1)), ops.Constant(b, np.int8(2)))
serialized_proto = b.Build().GetSerializedProto()
# Load and execute the proto
c = xla_client.Computation(xla_client._xla.XlaComputation(serialized_proto))
ans, = xla_client.execute_with_python_values(c.Compile())
np.testing.assert_equal(ans, np.int8(3))
class ParametersTest(ComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])
self.f64_scalar_2 = NumpyArrayF64(2.0)
self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])
self.s32_scalar_3 = NumpyArrayS32(3)
self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])
self.s64_scalar_3 = NumpyArrayS64(3)
self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])
def testScalarTimesVectorS32(self):
c = self._NewComputation()
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_3))
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(self.s32_4vector))
ops.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s32_scalar_3, self.s32_4vector],
expected=[[30, 45, -6, 21]])
def testScalarTimesVectorS64(self):
c = self._NewComputation()
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s64_scalar_3))
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(self.s64_4vector))
ops.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s64_scalar_3, self.s64_4vector],
expected=[[30, 45, -6, 21]])
def testScalarMinusVectorExplicitNumberingF32(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(self.f32_4vector))
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(self.f32_scalar_2))
ops.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[[-4.3, 1.3, -6.3, 3.3]])
def testScalarMinusVectorExplicitNumberingF64(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(self.f64_4vector))
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(self.f64_scalar_2))
ops.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[[-4.3, 1.3, -6.3, 3.3]])
class BufferTest(ComputationTest):
"""Tests focusing on execution with Buffers."""
def testConstantSum(self):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.float32(1.11)), ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(c, expected=[4.25])
def testOneParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11)], expected=[4.25])
def testTwoParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0.))))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11),
NumpyArrayF32(3.14)],
expected=[4.25])
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
arg = NumpyArrayF32(1.11)
backend = xla_client.get_local_backend()
compiled_c = backend.compile(c.Build())
arg_buffer = xla_client.Buffer.from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(RuntimeError):
compiled_c.Execute([arg_buffer])
def testShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = xla_client.Buffer.from_pyval(pyval)
xla_shape = local_buffer.shape()
self.assertEqual(xla_shape.dimensions(), (1, 2))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
def testBlockHostUntilReadyWorks(self):
arg = np.array([[1., 2.]], np.float32)
arg_buffer = xla_client.Buffer.from_pyval(arg)
arg_buffer.block_host_until_ready()
# This test merely checks that nothing goes awry when we call
# block_host_until_ready(); it's difficult to test anything else.
def testCopyToHost(self):
arg0 = np.array([[1., 2.]], np.float32)
arg1 = np.array([[3., 4.]], np.float32)
arg0_buffer = xla_client.Buffer.from_pyval(arg0)
arg1_buffer = xla_client.Buffer.from_pyval(arg1)
# Prefetch two buffers using copy_to_host_async, and then retrieve their
# values using to_py.
arg0_buffer.copy_to_host_async()
arg0_buffer.copy_to_host_async() # Duplicate calls don't do anything.
arg1_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
np.testing.assert_equal(arg1, arg1_buffer.to_py())
# copy_to_host_async does nothing after to_py is called.
arg0_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
def testDevice(self):
x = np.arange(8)
for device in xla_client.get_local_backend().local_devices():
buf = xla_client.Buffer.from_pyval(x, device=device)
self.assertEqual(buf.device(), device)
np.testing.assert_equal(x, buf.to_py())
class SingleOpTest(ComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
def testConcatenateF32(self):
c = self._NewComputation()
args = (
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0])),
ops.Constant(c, NumpyArrayF32([4.0, 5.0, 6.0])),
)
ops.ConcatInDim(c, args, dimension=0)
self._ExecuteAndCompareClose(c, expected=[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]])
def testConcatenateF64(self):
c = self._NewComputation()
args = (
ops.Constant(c, NumpyArrayF64([1.0, 2.0, 3.0])),
ops.Constant(c, NumpyArrayF64([4.0, 5.0, 6.0])),
)
ops.ConcatInDim(c, args, dimension=0)
self._ExecuteAndCompareClose(c, expected=[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]])
def testConvertElementType(self):
xla_types = {
np.bool: xla_client.PrimitiveType.PRED,
np.int32: xla_client.PrimitiveType.S32,
np.int64: xla_client.PrimitiveType.S64,
np.float32: xla_client.PrimitiveType.F32,
np.float64: xla_client.PrimitiveType.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype):
c = self._NewComputation()
x = ops.Constant(c, np.array(template, dtype=src_dtype))
ops.ConvertElementType(x, xla_types[dst_dtype])
backend = xla_client.get_local_backend()
result = xla_client.execute_with_python_values(backend.compile(c.Build()))
self.assertLen(result, 1)
expected = np.array(template, dtype=dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
x = [0, 1, 0, 0, 1]
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
def testBitcastConvertType(self):
xla_x32_types = {
np.int32: xla_client.PrimitiveType.S32,
np.float32: xla_client.PrimitiveType.F32,
}
xla_x64_types = {
np.int64: xla_client.PrimitiveType.S64,
np.float64: xla_client.PrimitiveType.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype, dst_etype):
c = self._NewComputation()
x = ops.Constant(c, np.array(template, dtype=src_dtype))
ops.BitcastConvertType(x, dst_etype)
backend = xla_client.get_local_backend()
result = xla_client.execute_with_python_values(backend.compile(c.Build()))
self.assertLen(result, 1)
expected = np.array(template, src_dtype).view(dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
x = [0, 1, 0, 0, 1]
for xla_types in [xla_x32_types, xla_x64_types]:
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype, xla_types[dst_dtype])
# TODO(b/123523486) implement AllToAll on CPU
def DISABLED_testAllToAllOneReplica(self):
samples = [
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples[:1]:
c = self._NewComputation()
ops.AllToAll(ops.Constant(c, lhs), 0, 0)
self._ExecuteAndCompareExact(c, expected=[lhs])
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(ops.Constant(c, lhs))
self._ExecuteAndCompareExact(c, expected=[lhs])
def testReplicaId(self):
c = self._NewComputation()
_ = ops.ReplicaId(c)
self._ExecuteAndCompareExact(c, expected=[0])
def testCrossReplicaSumOneReplicaWithSingletonGroup(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(
ops.Constant(c, lhs), xla_client.make_replica_groups([[0]]))
self._ExecuteAndCompareExact(c, expected=[lhs])
def testDotMatrixVectorF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0], [20.0]])
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
def testDotMatrixVectorF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0], [20.0]])
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
def testDotMatrixMatrixF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
def testDotMatrixMatrixF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithPrecisionConfig(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGH)
config.operand_precision.append(config.Precision.HIGHEST)
ops.DotGeneral(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
dimension_numbers,
precision_config=config)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads, lhs_dilation,
rhs_dilation, dimension_numbers)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedF32WithPrecisionConfig(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGHEST)
config.operand_precision.append(config.Precision.DEFAULT)
ops.ConvGeneralDilated(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
strides,
pads,
lhs_dilation,
rhs_dilation,
dimension_numbers,
precision_config=config)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NHWC", "OIHW", "CWNH"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, np.transpose(lhs, (0, 2, 3, 1))), ops.Constant(c, rhs),
strides, pads, lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(
c, expected=[np.transpose(result, (1, 3, 0, 2))])
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
feature_group_count = 2
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
], [
[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
ops.Not(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[~arr])
def testPopulationCount(self):
c = self._NewComputation()
arr = NumpyArrayS32([3, 0, 1])
ops.PopulationCount(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.array([2, 0, 1])])
def testCountLeadingZeros(self):
c = self._NewComputation()
arr = NumpyArrayS32([0x7FFF, 0x12345678])
ops.Clz(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[[17, 3]])
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Exp(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.exp(arr)])
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Expm1(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.expm1(arr)])
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Round(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.round(arr)])
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log(arr)])
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log1p(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log1p(arr)])
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Neg(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[-arr])
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Floor(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.floor(arr)])
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Ceil(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.ceil(arr)])
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
ops.Abs(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.abs(arr)])
def testTanh(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Tanh(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)])
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
ops.Transpose(ops.Constant(c, array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=[expected])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
ops.Eq(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
def testNe(self):
c = self._NewComputation()
ops.Ne(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, True]])
ops.Ne(
ops.Constant(c, NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
ops.Constant(c, NumpyArrayF32([2.0, -0.0, 1.0,
float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose, c, (), expected=[[True, False, True, True]])
def testGt(self):
c = self._NewComputation()
ops.Gt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, True, True, False, False]])
def testGe(self):
c = self._NewComputation()
ops.Ge(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[[True, True, True, False, False]])
def testLt(self):
c = self._NewComputation()
ops.Lt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, False, False, True, True]])
def testLe(self):
c = self._NewComputation()
ops.Le(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, True, True]])
def testMax(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 2.0, 3.0, 7.0, 12.0]])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 3, 3], [4, 5, 6], [7, 8, 9]]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 4, 5], [4, 5, 6], [7, 8, 9]]])
def testMin(self):
c = self._NewComputation()
ops.Min(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 0.0, 2.0, 4.0, 9.0]])
def testPad(self):
c = self._NewComputation()
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)),
xla_client.make_padding_config([(1, 2, 1), (0, 1, 0)]))
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = xla_client.PaddingConfigDimension()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
padding_config.dimensions.append(dimension)
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)), padding_config)
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testReshape(self):
c = self._NewComputation()
ops.Reshape(
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [4, 5, 6]]])
def testCollapse(self):
c = self._NewComputation()
ops.Collapse(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3, 4], [5, 6, 7, 8]]])
def testRev(self):
c = self._NewComputation()
ops.Rev(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[[6, 5], [8, 7]], [[2, 1], [4, 3]]]])
def testReducePrecision(self):
c = self._NewComputation()
ops.ReducePrecision(
ops.Constant(c, NumpyArrayF32([float.fromhex("0x1.32fffep-3")])),
exponent_bits=8,
mantissa_bits=7)
self._ExecuteAndCompareClose(c, expected=[[float.fromhex("0x1.32p-3")]])
def testClampF32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayF32(-1)),
ops.Constant(c, NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testClampS32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayS32(-1)),
ops.Constant(c, NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testSelect(self):
c = self._NewComputation()
ops.Select(
ops.Constant(c, NumpyArrayBool([True, False, False, True, False])),
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 5])),
ops.Constant(c, NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[[1, -2, -3, 4, -5]])
def testSlice(self):
c = self._NewComputation()
ops.Slice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[1, 0], [3, 2], [1, 1])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testSliceInDim(self):
c = self._NewComputation()
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[[2], [5], [8]]])
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [7, 8, 9]]])
def testDynamicSlice(self):
c = self._NewComputation()
ops.DynamicSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[ops.Constant(c, NumpyArrayS32([1, 0]))], [2, 2])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
ops.DynamicUpdateSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4]])),
[ops.Constant(c, NumpyArrayS32([1, 1]))])
self._ExecuteAndCompareExact(
c, expected=[[[1, 2, 3], [4, 1, 2], [7, 3, 4]]])
def testTuple(self):
c = self._NewComputation()
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
])
backend = xla_client.get_local_backend()
result = xla_client.execute_with_python_values(backend.compile(c.Build()))
self.assertLen(result, 3)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
ops.GetTupleElement(
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
]), 1)
self._ExecuteAndCompareClose(c, expected=[[1.0, 2.0]])
def testBroadcast(self):
c = self._NewComputation()
ops.Broadcast(ops.Constant(c, NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]]])
def testBroadcastInDim(self):
c = self._NewComputation()
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[[1, 1], [2, 2]]])
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[[1, 2], [1, 2]]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
ops.RngNormal(
ops.Constant(c, NumpyArrayF32(0.)),
ops.Constant(c, NumpyArrayF32(1.)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32, shape))
backend = xla_client.get_local_backend()
result = xla_client.execute_with_python_values(backend.compile(c.Build()))
# since the result is random, we just check shape and uniqueness
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayF32(lo)),
ops.Constant(c, NumpyArrayF32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32, shape))
backend = xla_client.get_local_backend()
result = xla_client.execute_with_python_values(backend.compile(c.Build()))
# since the result is random, we just check shape, uniqueness, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayS32(lo)),
ops.Constant(c, NumpyArrayS32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.S32, shape))
backend = xla_client.get_local_backend()
result = xla_client.execute_with_python_values(backend.compile(c.Build()))
# since the result is random, we just check shape, integrality, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertEqual(result[0].dtype, np.int32)
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
ops.Cholesky(ops.Constant(c, np.dot(l, l.T)))
self._ExecuteAndCompareClose(c, expected=[l], rtol=1e-4)
def testSort(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
c = self._NewComputation()
ops.Sort(c, [ops.Constant(c, keys)])
self._ExecuteAndCompareClose(
c, expected=[np.array([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=np.float32)])
def testSortKeyVal(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(c, (ops.Constant(c, keys), ops.Constant(c, values)), dimension=0)
backend = xla_client.get_local_backend()
result = xla_client.execute_with_python_values(backend.compile(c.Build()))
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[2, 1, 1, 2], [3, 4, 4, 3]])
np.testing.assert_equal(result[1], [[0, 5, 2, 7], [4, 1, 6, 3]])
def testSortCustomComparator(self):
b = self._NewComputation("comparator")
p0 = ops.Parameter(b, 0, xla_client.shape_from_pyval(NumpyArrayF32(0)))
q0 = ops.Parameter(b, 1, xla_client.shape_from_pyval(NumpyArrayF32(0)))
p1 = ops.Parameter(b, 2, xla_client.shape_from_pyval(NumpyArrayS32(0)))
q1 = ops.Parameter(b, 3, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Or(ops.Lt(p0, q0), ops.And(ops.Eq(p0, q0), ops.Gt(p1, q1)))
comparator = b.Build()
keys = np.array([[2, 3, 1, 3], [3, 1, 2, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(
c, (ops.Constant(c, keys), ops.Constant(c, values)),
dimension=1,
comparator=comparator)
backend = xla_client.get_local_backend()
result = xla_client.execute_with_python_values(backend.compile(c.Build()))
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[1, 2, 3, 3], [1, 2, 2, 3]])
np.testing.assert_equal(result[1], [[2, 0, 3, 1], [5, 7, 6, 4]])
def testQR(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.QR(ops.Constant(c, a), full_matrices=True))
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testEigh(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
a = (a + a.T) / 2
c = self._NewComputation()
ops.Tuple(c, ops.Eigh(ops.Constant(c, a), lower=True))
# TODO(b/129396575): Turn this test back on when it passes without fastmath.
# v, w = self._Execute(c, ())
# self.assertLess(np.linalg.norm(np.dot(a, v) - w * v), 1e-3)
def testSVD(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.SVD(ops.Constant(c, a)))
u, d, v = self._Execute(c, ())
self.assertLess(np.linalg.norm(a - np.matmul(u * d, v.T)), 1e-3)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
ops.TriangularSolve(
ops.Constant(c, a_vals),
ops.Constant(c, b_vals),
left_side=False,
lower=True,
transpose_a=ops.TriangularSolveOptions_Transpose.TRANSPOSE,
unit_diagonal=False)
self._ExecuteAndCompareClose(
c,
expected=[
np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
],
dtype=np.float32)
],
rtol=1e-4)
def testIsConstant(self):
c = self._NewComputation()
a = ops.Constant(c, np.int32(3))
b = ops.Constant(c, np.int32(1))
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayS32(0)))
const_expr = ops.Sub(b, a)
non_const_expr = ops.Mul(const_expr, x)
self.assertTrue(c.IsConstant(const_expr))
self.assertFalse(c.IsConstant(non_const_expr))
def testGather(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32)
dnums = xla_client.GatherDimensionNumbers()
dnums.offset_dims.append(1)
dnums.offset_dims.append(2)
dnums.start_index_map.append(0)
dnums.start_index_map.append(1)
dnums.index_vector_dim = 2
c = self._NewComputation()
ops.Gather(
ops.Constant(c, a), ops.Constant(c, indices), dnums, slice_sizes=[1, 1])
g, = self._Execute(c, ())
expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32)
np.testing.assert_allclose(g, expected, rtol=1e-4)
def testFft(self):
shape = [2, 3, 4, 5]
rng = np.random.RandomState(0)
a = rng.randn(*shape) + 1.0j * rng.randn(*shape)
a = a.astype(np.complex64)
# FFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.FFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.fftn(a, axes=(1, 2, 3))], rtol=1e-4)
# IFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.ifftn(a, axes=(1, 2, 3))], rtol=1e-4)
# RFFT
b = rng.randn(*shape).astype(np.float32)
c = self._NewComputation()
ops.Fft(ops.Constant(c, b), xla_client.FftType.RFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.rfftn(b, axes=(1, 2, 3))], rtol=1e-4)
# IRFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IRFFT, [3, 4, 8])
self._ExecuteAndCompareClose(
c, expected=[np.fft.irfftn(a, axes=(1, 2, 3))], rtol=1e-4)
def testNextAfter(self):
c = self._NewComputation()
ops.NextAfter(
ops.Constant(c, np.array([1, 2], dtype=np.float32)),
ops.Constant(c, np.array([2, 1], dtype=np.float32)))
out, = self._Execute(c, ())
eps = np.finfo(np.float32).eps
np.testing.assert_equal(np.array([eps + 1, 2 - eps], dtype=np.float32), out)
def testRegularizedIncompleteBeta(self):
x = np.array([0.53787335, 0.24015466, 0.47494545, 0.13567594, 0.95114538])
a = np.array([0.00753073, 0.34813385, 0.30485708, 1.29298632, 0.51472606])
b = np.array([0.55688389, 0.59794214, 0.42661022, 1.59748339, 0.95047677])
c = self._NewComputation()
ops.RegularizedIncompleteBeta(
ops.Constant(c, a), ops.Constant(c, b), ops.Constant(c, x))
expected = np.array(
[0.98923271, 0.48575411, 0.57952568, 0.12579775, 0.96989155])
self._ExecuteAndCompareClose(c, expected=[expected], rtol=1e-4)
class EmbeddedComputationsTest(ComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantS32Computation(self):
"""Computation (f32) -> s32 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s32_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0)))
ops.Constant(c, np.int32(1))
return c.Build()
def _CreateConstantS64Computation(self):
"""Computation (f64) -> s64 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s64_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF64(0)))
ops.Constant(c, np.int64(1))
return c.Build()
def _CreateConstantF32Computation(self):
"""Computation (f32) -> f32 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f32_one")
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0)))
ops.Constant(c, np.float32(1.0))
return c.Build()
def _CreateConstantF64Computation(self):
"""Computation (f64) -> f64 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f64_one")
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF64(0)))
ops.Constant(c, np.float64(1.0))
return c.Build()
def _CreateMulF32By2Computation(self):
"""Computation (f32) -> f32 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
ops.Mul(
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(
NumpyArrayF32(0)).with_major_to_minor_layout_if_absent()),
ops.Constant(c, np.float32(2.0)))
return c.Build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
ops.Mul(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0))))
return c.Build()
def _CreateMulF64By2Computation(self):
"""Computation (f64) -> f64 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f64_by2")
ops.Mul(
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(
NumpyArrayF64(0)).with_major_to_minor_layout_if_absent()),
ops.Constant(c, np.float64(2.0)))
return c.Build()
def _CreateBinaryAddS32Computation(self):
"""Computation (s32, s32) -> s32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayS32(0))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayS32(0))))
return c.Build()
def _CreateBinaryAddF32Computation(self):
"""Computation (f32, f32) -> f32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
ops.Add(
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(
NumpyArrayF32(0)).with_major_to_minor_layout_if_absent()),
ops.Parameter(
c, 1,
xla_client.shape_from_pyval(
NumpyArrayF32(0)).with_major_to_minor_layout_if_absent()))
return c.Build()
def _CreateBinaryAddF64Computation(self):
"""Computation (f64, f64) -> f64 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
ops.Add(
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(
NumpyArrayF64(0)).with_major_to_minor_layout_if_absent()),
ops.Parameter(
c, 1,
xla_client.shape_from_pyval(
NumpyArrayF64(0)).with_major_to_minor_layout_if_absent()))
return c.Build()
def _CreateBinaryDivF32Computation(self):
"""Computation (f32, f32) -> f32 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
ops.Div(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0))))
return c.Build()
def _CreateBinaryDivF64Computation(self):
"""Computation (f64, f64) -> f64 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
ops.Div(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF64(0))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF64(0))))
return c.Build()
def _CreateTestF32Lt10Computation(self):
"""Computation (f32) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f32_lt_10")
ops.Lt(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0))),
ops.Constant(c, np.float32(10.)))
return c.Build()
def _CreateTestF64Lt10Computation(self):
"""Computation (f64) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f64_lt_10")
ops.Lt(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF64(0))),
ops.Constant(c, np.float64(10.)))
return c.Build()
def _CreateBinaryGeF32Computation(self):
"""Computation (f32, f32) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
ops.Ge(
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(
NumpyArrayF32(0)).with_major_to_minor_layout_if_absent()),
ops.Parameter(
c, 1,
xla_client.shape_from_pyval(
NumpyArrayF32(0)).with_major_to_minor_layout_if_absent()))
return c.Build()
def _CreateBinaryGeF64Computation(self):
"""Computation (f64, f64) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
ops.Ge(
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(
NumpyArrayF64(0)).with_major_to_minor_layout_if_absent()),
ops.Parameter(
c, 1,
xla_client.shape_from_pyval(
NumpyArrayF64(0)).with_major_to_minor_layout_if_absent()))
return c.Build()
def _MakeSample3DArrayF32(self):
return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def _MakeSample3DArrayF64(self):
return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def testCallF32(self):
c = self._NewComputation()
ops.Call(
c,
self._CreateMulF32By2Computation(),
operands=(ops.Constant(c, np.float32(5.0)),))
self._ExecuteAndCompareClose(c, expected=[10.0])
def testCallF64(self):
c = self._NewComputation()
ops.Call(
c,
self._CreateMulF64By2Computation(),
operands=(ops.Constant(c, np.float64(5.0)),))
self._ExecuteAndCompareClose(c, expected=[10.0])
def testMapEachElementToS32Constant(self):
c = self._NewComputation()
ops.Map(c, [ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS32Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1, 1, 1]])
def testMapEachElementToS64Constant(self):
c = self._NewComputation()
ops.Map(c, [ops.Constant(c, NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS64Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1, 1, 1]])
def testMapMulBy2F32(self):
c = self._NewComputation()
ops.Map(c, [ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 4.0, 6.0, 8.0]])
def testMapMulBy2F64(self):
c = self._NewComputation()
ops.Map(c, [ops.Constant(c, NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 4.0, 6.0, 8.0]])
def testSimpleMapChainF32(self):
# Chains a map of constant-f32 with a map of mul-by-2
c = self._NewComputation()
const_f32 = ops.Map(c,
[ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF32Computation(), [0])
ops.Map(c, [const_f32], self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 2.0, 2.0, 2.0]])
def testSimpleMapChainF64(self):
# Chains a map of constant-f64 with a map of mul-by-2
c = self._NewComputation()
const_f64 = ops.Map(c,
[ops.Constant(c, NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF64Computation(), [0])
ops.Map(c, [const_f64], self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 2.0, 2.0, 2.0]])
def testDivVectorsWithMapF32(self):
c = self._NewComputation()
ops.Map(c, (ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
ops.Constant(c, NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF32Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[[0.2, 0.4, 0.75, 1.0]])
def testDivVectorsWithMapF64(self):
c = self._NewComputation()
ops.Map(c, (ops.Constant(c, NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
ops.Constant(c, NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF64Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[[0.2, 0.4, 0.75, 1.0]])
def testSelectAndScatterF32(self):
c = self._NewComputation()
operand = ops.Constant(c, NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]]))
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID,
c.GetShape(operand).dimensions(), window_dimensions, window_strides)
ops.SelectAndScatterWithGeneralPadding(
operand,
select=self._CreateBinaryGeF32Computation(),
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
source=ops.Constant(c, NumpyArrayF32([[0.1, 0.2]])),
init_value=ops.Constant(c, NumpyArrayF32(1)),
scatter=self._CreateBinaryAddF32Computation())
self._ExecuteAndCompareClose(c, expected=[[[1., 1., 1.2], [1.1, 1., 1.]]])
def testSelectAndScatterF64(self):
c = self._NewComputation()
operand = ops.Constant(c, NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]]))
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID,
c.GetShape(operand).dimensions(), window_dimensions, window_strides)
ops.SelectAndScatterWithGeneralPadding(
operand,
select=self._CreateBinaryGeF64Computation(),
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
source=ops.Constant(c, NumpyArrayF64([[0.1, 0.2]])),
init_value=ops.Constant(c, NumpyArrayF64(1)),
scatter=self._CreateBinaryAddF64Computation())
self._ExecuteAndCompareClose(c, expected=[[[1., 1., 1.2], [1.1, 1., 1.]]])
def testReduce1DtoScalarF32(self):
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
init_values=[ops.Constant(c, np.float32(0))],
computation=self._CreateBinaryAddF32Computation(),
dimensions_to_reduce=[0])
self._ExecuteAndCompareClose(c, expected=[10])
def testReduce1DtoScalarF64(self):
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
init_values=[ops.Constant(c, np.float64(0))],
computation=self._CreateBinaryAddF64Computation(),
dimensions_to_reduce=[0])
self._ExecuteAndCompareClose(c, expected=[10])
def testReduce2DTo1DDim0F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, np.float32(0))],
computation=self._CreateBinaryAddF32Computation(),
dimensions_to_reduce=[0])
self._ExecuteAndCompareClose(c, expected=[[5, 7, 9]])
def testReduce2DTo1DDim0F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, np.float64(0))],
computation=self._CreateBinaryAddF64Computation(),
dimensions_to_reduce=[0])
self._ExecuteAndCompareClose(c, expected=[[5, 7, 9]])
def testReduce2DTo1DDim1F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, np.float32(0))],
computation=self._CreateBinaryAddF32Computation(),
dimensions_to_reduce=[1])
self._ExecuteAndCompareClose(c, expected=[[6, 15]])
def testReduce2DTo1DDim1F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, np.float64(0))],
computation=self._CreateBinaryAddF64Computation(),
dimensions_to_reduce=[1])
self._ExecuteAndCompareClose(c, expected=[[6, 15]])
def testReduce3DAllPossibleWaysF32(self):
input_array = self._MakeSample3DArrayF32()
def _ReduceAndTest(*dims):
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, np.float32(0))],
computation=self._CreateBinaryAddF32Computation(),
dimensions_to_reduce=dims)
self._ExecuteAndCompareClose(
c, expected=[np.sum(input_array, axis=tuple(dims))])
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduce3DAllPossibleWaysF64(self):
input_array = self._MakeSample3DArrayF64()
def _ReduceAndTest(*dims):
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, np.float64(0))],
computation=self._CreateBinaryAddF64Computation(),
dimensions_to_reduce=dims)
self._ExecuteAndCompareClose(
c, expected=[np.sum(input_array, axis=tuple(dims))])
_ReduceAndTest(0)
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduceWindowValidUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, np.float32(0)),
computation=self._CreateBinaryAddF32Computation(),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.]]])
def testReduceWindowSameUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.SAME, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, np.float32(0)),
computation=self._CreateBinaryAddF32Computation(),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.], [4., 5., 6.]]])
def testReduceWindowValidGeneralStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, np.float32(0)),
computation=self._CreateBinaryAddF32Computation(),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 9.]]])
def testReduceWindowValidUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, np.float64(0)),
computation=self._CreateBinaryAddF64Computation(),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.]]])
def testReduceWindowSameUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.SAME, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, np.float64(0)),
computation=self._CreateBinaryAddF64Computation(),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.], [4., 5., 6.]]])
def testReduceWindowValidGeneralStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, np.float64(0)),
computation=self._CreateBinaryAddF64Computation(),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 9.]]])
def testWhileF32(self):
cond = self._CreateTestF32Lt10Computation()
body = self._CreateMulF32By2Computation()
c = self._NewComputation()
init = ops.Constant(c, np.float32(1.))
ops.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=[16.])
def testWhileF64(self):
cond = self._CreateTestF64Lt10Computation()
body = self._CreateMulF64By2Computation()
c = self._NewComputation()
init = ops.Constant(c, np.float64(1.))
ops.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=[16.])
def testConditionalTrue(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(True))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulF32By2Computation()
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantF32Computation()
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[6.])
def testConditionalFalse(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(False))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulF32By2Computation()
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantF32Computation()
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[1.])
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed[0]).with_major_to_minor_layout_if_absent()), 0)
backend = xla_client.get_local_backend()
compiled_c = backend.compile(c.Build())
for item in to_infeed:
xla_client.transfer_to_infeed(item)
for item in to_infeed:
result, = xla_client.execute_with_python_values(compiled_c)
self.assertEqual(result, item)
def testInfeedTuple(self):
to_infeed = (NumpyArrayS32([1, 2, 3, 4]), NumpyArrayS32([[7], [8]]))
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed).with_major_to_minor_layout_if_absent()), 0)
backend = xla_client.get_local_backend()
compiled_c = backend.compile(c.Build())
xla_client.transfer_to_infeed(to_infeed)
result = xla_client.execute_with_python_values(compiled_c)
self.assertLen(result, 2)
np.testing.assert_equal(result[0], to_infeed[0])
np.testing.assert_equal(result[1], to_infeed[1])
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x_and_token = ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent())
x = ops.GetTupleElement(x_and_token, 0)
token = ops.GetTupleElement(x_and_token, 1)
outfeed_shape = xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent()
ops.OutfeedWithToken(x, token, outfeed_shape)
backend = xla_client.get_local_backend()
compiled_c = backend.compile(c.Build())
for want in to_round_trip:
execution = threading.Thread(target=lambda: compiled_c.Execute([]))
execution.start()
xla_client.transfer_to_infeed(want)
got = xla_client.transfer_from_outfeed(outfeed_shape)
execution.join()
self.assertEqual(want, got)
def testScatter(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
scatter_indices = np.array([0, 2], dtype=np.int32)
updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32)
dnums = xla_client.ScatterDimensionNumbers()
dnums.update_window_dims.append(1)
dnums.inserted_window_dims.append(0)
dnums.scatter_dims_to_operand_dims.append(0)
dnums.index_vector_dim = 1
c = self._NewComputation()
ops.Scatter(
ops.Constant(c, a), ops.Constant(c, scatter_indices),
ops.Constant(c, updates), self._CreateBinaryAddS32Computation(), dnums)
expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]], dtype=np.int32)
self._ExecuteAndCompareClose(c, expected=[expected])
class ErrorTest(ComputationTest):
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testCompileWithWrongElementTypeInLayout(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.ClearOpMetadata()
options = xla_client.CompileOptions()
options.argument_layouts = [
xla_client.Shape.array_shape(np.dtype(np.float32), [])
]
backend = xla_client.get_local_backend()
def TestFun():
return backend.compile(c.Build(), compile_options=options)
self.assertRaisesRegex(
RuntimeError, r".*Invalid argument shape.*"
r"expected s32\[\], got f32\[\].*", TestFun)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.ClearOpMetadata()
backend = xla_client.get_local_backend()
def TestFun():
return xla_client.execute_with_python_values(
backend.compile(c.Build()), [self.f32_scalar_2])
self.assertRaisesRegex(
RuntimeError, r"Invalid argument: Argument does not match.*"
r"want s32\[\], got f32\[\].*", TestFun)
class ComputationRootTest(ComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
backend = xla_client.get_local_backend()
compiled_c = backend.compile(c.Build(result))
ans, = xla_client.execute_with_python_values(compiled_c, [arg])
np.testing.assert_allclose(ans, 4.14)
class SetShardingTest(ComputationTest):
"""Tests related to set OpSharding."""
def testSetSharding(self):
c = self._NewComputation()
sharding = xla_client.OpSharding()
sharding.type = sharding.type.REPLICATED
sharding.tile_assignment_dimensions.extend([1])
sharding.tile_assignment_devices.extend([0])
# Set Sharding.
c.SetSharding(sharding)
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
# Clear Sharding.
c.ClearSharding()
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
backend = xla_client.get_local_backend()
compiled_c = backend.compile(c.Build(result))
ans, = xla_client.execute_with_python_values(compiled_c, [arg])
np.testing.assert_allclose(ans, 4.14)
class AliasTest(ComputationTest):
def testSetUpAlias(self):
c = self._NewComputation()
p1 = ops.Parameter(
c, 0,
xla_client.shape_from_pyval(
NumpyArrayF32(1.0)).with_major_to_minor_layout_if_absent())
p2 = ops.Parameter(
c, 1,
xla_client.shape_from_pyval(
NumpyArrayF32(1.0)).with_major_to_minor_layout_if_absent())
out = ops.Add(p1, p2)
c.SetUpAlias([], 0, [])
c = c.Build(out)
backend = xla_client.get_local_backend()
with self.assertRaisesRegex(
RuntimeError, "Buffer aliasing is not supported "
"by XLA for non-TPU backends"):
backend.compile(c)
int_dtypes = [
np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32,
np.uint64
]
float_dtypes = [np.float16, np.float32, np.float64]
complex_dtypes = [np.complex64, np.complex128]
dlpack_dtypes = int_dtypes + float_dtypes + [bfloat16]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
testcase_shapes = [
(),
(1,),
(2, 3),
(2, 0),
(0, 7),
(4, 1, 2),
(2, 1, 3),
(2, 4, 1),
(3, 1),
(1, 3),
]
def FormatShapeAndDtype(shape, dtype):
return "_{}[{}]".format(np.dtype(dtype).name, ",".join(map(str, shape)))
class DLPackTest(parameterized.TestCase):
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters({
"testcase_name": FormatShapeAndDtype(shape, dtype),
"dtype": dtype,
"shape": shape
} for dtype in dlpack_dtypes for shape in testcase_shapes)
def testRoundTrip(self, dtype, shape):
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
backend = xla_client.get_local_backend()
buffer = xla_client.Buffer.from_pyval(x, backend=backend)
dlt = xla_client._xla.BufferToDLPackManagedTensor(buffer)
del buffer # Free "buffer" to make sure dlt retains ownership.
self.assertEqual(type(dlt).__name__, "PyCapsule")
y = xla_client._xla.DLPackManagedTensorToBuffer(dlt, backend.client)
np.testing.assert_array_equal(x, y.to_py())
def testTensorsCanBeConsumedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
backend = xla_client.get_local_backend()
buffer = xla_client.Buffer.from_pyval(x, backend=backend)
dlt = xla_client._xla.BufferToDLPackManagedTensor(buffer)
def ConsumeDLPackTensor():
_ = xla_client._xla.DLPackManagedTensorToBuffer(dlt, backend.client)
ConsumeDLPackTensor()
self.assertRaisesRegex(RuntimeError,
".*a DLPack tensor may be consumed at most once.*",
ConsumeDLPackTensor)
class BufferProtocolTest(parameterized.TestCase):
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters({
"testcase_name": FormatShapeAndDtype(shape, dtype),
"dtype": dtype,
"shape": shape
} for dtype in standard_dtypes for shape in testcase_shapes)
def testRoundTrip(self, dtype, shape):
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
x_ptr = x.__array_interface__["data"][0]
backend = xla_client.get_local_backend("cpu")
buffer = xla_client.Buffer.from_pyval(x, backend=backend)
y = np.array(buffer, copy=False)
y_ptr = y.__array_interface__["data"][0]
np.testing.assert_array_equal(x, y)
# If the input was sufficiently aligned, the input and output should alias.
self.assertTrue((x_ptr & 63) != 0 or x_ptr == y_ptr)
self.assertEqual(y_ptr, buffer.unsafe_buffer_pointer())
buffer2 = xla_client.Buffer.from_pyval(x, backend=backend, force_copy=True)
z = np.array(buffer2, copy=False)
self.assertNotEqual(x.__array_interface__["data"][0],
z.__array_interface__["data"][0])
def testDeleteWithActiveView(self):
x = np.random.randn(20, 10)
backend = xla_client.get_local_backend("cpu")
buffer = xla_client.Buffer.from_pyval(x, backend=backend)
buffer_ptr = buffer.unsafe_buffer_pointer()
y = np.array(buffer, copy=False)
buffer.delete()
# It is still legal to access `y`; the array view must keep it alive.
np.testing.assert_array_equal(x, y)
self.assertEqual(y.__array_interface__["data"][0], buffer_ptr)
class ProfilerTest(absltest.TestCase):
def testTraceMe(self):
# TODO(phawkins): These tests just check that the TraceMe context manager
# acts like a context manager and doesn't explode. Ideally we'd check that
# the profiler saw the traceme too.
with xla_client.profiler.TraceMe("test1"):
pass
with xla_client.profiler.TraceMe("test2", foo=123):
pass
with self.assertRaises(ValueError):
with xla_client.profiler.TraceMe("test3"):
raise ValueError("test")
@unittest.skipIf(portpicker is None, "Test requires portpicker")
def testStartServer(self):
port = portpicker.pick_unused_port()
server = xla_client.profiler.start_server(port)
del server
if __name__ == "__main__":
absltest.main()
|
server_UDP_modified.py
|
import socket
import threading
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('127.0.0.1',9999))
print('Bind UDP on 9999...')
def udplink(data, addr):
print('Received from %s:%s.' % addr)
s.sendto(b'%s' % data, addr)
def thread_func():
global s
print("thread started")
while 1:
conn, addr = s.recvfrom(1024)
udplink(conn, addr)
num_thread = 8
for i in range(num_thread):
t = threading.Thread(target=thread_func)
t.start()
|
Presentiment.py
|
############ TAGS TO BE USED WITH EXTENSION "BETTER COMMENTS"
# $ TÍTULO / DONE
# & Subtítulo
# ! Warning
# * Demo
# ? Duda/aclaración
# % To do
# x Borrado
############
import sys
import os
import random
import ctypes
import pandas
import numpy
import requests
import json
import time
import threading
from datetime import datetime, timedelta
from PIL import Image
import glob
from PyQt5 import QtGui
from PyQt5.QtCore import QDateTime, Qt, QTimer, QEventLoop, QDir
from PyQt5.QtWidgets import (QApplication, QComboBox,QDialog, QGridLayout, QGroupBox, QLabel, QLineEdit,
QPushButton, QTextEdit, QVBoxLayout, QHBoxLayout, QWidget, QMessageBox, QSpinBox, QDesktopWidget,
QCheckBox, QFileDialog, QTabWidget, QSizePolicy)
#? README
#? Intervalo entre 0 y 99 segundos
#? Max no. de trials = 99
#? To export, add the ".csv" extension
#? For Neulog, you need to open Neulog API
#$ TODO:
#! Sharing code
#! Dynamic "path_REG_dll"
#! Diseño
#! Agregar imagen de diseño experimental
#! Add Z0 a Phys Data, a lado de Trial e Instances
#! Add info
#! Add manual
#! Adaptar exports
#! Cerrar puertas de errores
#! Don't close if RNG is not selected
#! Don't close if error at exporting
#! Don't close if statistical analysis can't be realized
#! Don't close if no library is selected
#! Alertar si no se ha elegido banco de imagenes
#! no salir si no hay port en neulog
#! Revisar otras "#!"
#! Evitar que haya diferente sample rate entre neulogs (no se puede y rompe)
#! Solucionar on-demand procedure; rompería el sample calculado para el neulog
#! Compilar con https://pypi.org/project/auto-py-to-exe/
#! LOW PRIORITY:
#! Falta agregar "Opened", "GetBits", "GetBytes" y "APIVersion" a la clase PsyREG
#! Include FOR loop for each psyleron connected en "click_refresh_sources"
#! agregar dimensiones de análisis (Ej. Miedo[Muerte-Peligro, Animales-Lesiones, etc.])
#$ WISHLIST:
#! Separación entre hombres y mujeres
#! Seleccionar estímulos visuales
#$ Images (DONE)
#! Light flashes
#! Seleccionar estímulos auditivos
#! Estruendos
#! Silencio
#! Ruido blanco
#! Physiological sensors
#! Neulog
#! Emotiv
#! Vernier Go-Direct
#! BIOPAC
################################3
class Pseudo_RNG():
def __init__(self):
self.name = "Pseudo-RNG"
def get_bits(self, maxbts):
str_list = []
for x in range(maxbts):
str_list.append(random.randrange(0,2))
str_bits = ''.join(str(x) for x in str_list)
# print(str_bits)
return str_bits
class PsyREG():
def __init__(self):
#? Define path of DLL
self.path_REG_dll = os.path.join(os.getcwd(), '.\Presentimiento\PsyREG.dll') # DLL file path
# self.path_REG_dll = r'C:\Users\ramse\Python_ENVS\Presentimiento\PsyREG.dll' # DLL file path
self.REG_dll = ctypes.CDLL(self.path_REG_dll) # load DLL
#? Define variables
PSYREG_API_VERSION = 1 # Version of the API that this header is indended for. Should be compared to PsyREGAPIVersion() */
INVALID_DATASOURCE = -1 # Constant representing an invalid Datasource */
BSS_GOOD = 0x0000 # no flags set. the device is ok and there are no problems */
BSS_CONNECTING = 0x0001 # device connection is being established (in the process of opening) */
BSS_WAITING = 0x0002 # waiting for device data (buffer empty) */
BSS_BUSY = 0x0004 # device is in use by another application */
BSS_NODEVICE = 0x0008 # there is no device by this name connected anymore */
BSS_READERROR = 0x0010 # was there a read error during the last read */
BSS_BADCFG = 0x0020 # was there a bad configuration for the device (e.g. conflicting values or unset values) */
BSS_CANTPROCESS = 0x0040 # was there a processing error? [set at bitsource level] */
BSS_INITERROR = 0x0080 # was there an initialization error / problem with the data structure [set at bitsource level] */
BSS_TIMEOUT = 0x0100 # did the reader time out since the last device read [set at bitsource level] */
BSS_GENERALERROR = 0x8000 # was there any error at all. set if any other error (busy, nodevice, readerror, cantprocess) [set at bitsource level] */
BSS_INVALID = 0x0200 # is the DataSource invalid. This occurs when a DataSource was not created or has already been destroyed. */
def get_name(self):
#? Obtain the Type and ID of the Psyleron, and return in a formatted string
self.invoke_RNG()
source = self.get_source()
# Define all the types of results and arguments in the PsyREG dll functions
self.REG_dll.PsyREGGetDeviceTypeBSTR.restype = ctypes.c_char_p
self.REG_dll.PsyREGGetDeviceTypeBSTR.argtypes = [ctypes.c_int32]
self.REG_dll.PsyREGGetDeviceIdBSTR.restype = ctypes.c_char_p
self.REG_dll.PsyREGGetDeviceIdBSTR.argtypes = [ctypes.c_int32]
PsyREG_ID = self.REG_dll.PsyREGGetDeviceIdBSTR(source)
PsyREG_ID = PsyREG_ID.decode("utf-8") #Decode from byte to string
PsyREG_Type = self.REG_dll.PsyREGGetDeviceTypeBSTR(source)
PsyREG_Type = PsyREG_Type.decode("utf-8") #Decode from byte to string
name_PsyREG = ("Psyleron %s: %s" % (PsyREG_Type, PsyREG_ID)) # Format string of the name
# print(name_PsyREG)
return name_PsyREG
def get_bits(self, maxbts):
#? Obtain 1 bit of random data; 1 or 0
self.invoke_RNG()
source = self.get_source()
self.open_RNG()
# Define all the types of results and arguments in the PsyREG dll functions
self.REG_dll.PsyREGGetBit.restype = ctypes.c_int32
self.REG_dll.PsyREGGetBit.argtypes = [ctypes.c_int32,ctypes.POINTER(ctypes.c_ubyte)]
# For loop for x number of MAXBTS stated
str_list = []
for bit_psyreg in range(maxbts):
bit_psyreg = ctypes.c_ubyte()
self.REG_dll.PsyREGGetBit(source, ctypes.byref(bit_psyreg))
str_list.append(bit_psyreg.value)
str_bits = ''.join(str(x) for x in str_list)
# print(str_bits)
return str_bits
def get_bytes(self, maxbts):
#? Obtain 1 byte (between 0 and 255) of random data
self.invoke_RNG()
source = self.get_source()
self.open_RNG()
# Define all the types of results and arguments in the PsyREG dll functions
self.REG_dll.PsyREGGetByte.restype = ctypes.c_int32
self.REG_dll.PsyREGGetByte.argtypes = [ctypes.c_int32,ctypes.POINTER(ctypes.c_ubyte)]
# For loop for x number of MAXBTS stated
str_list = []
for byte_psyreg in range(maxbts):
byte_psyreg = ctypes.c_ubyte()
self.REG_dll.PsyREGGetByte(source, ctypes.byref(byte_psyreg))
str_list.append(byte_psyreg.value)
str_bytes = ''.join(str(x) for x in str_list)
# print(str_bytes)
return str_bytes
# def get_bits(self,max_bits): ######! NOT WORKING YET
# #? Obtain chunks of bits
# self.invoke_RNG()
# source = self.get_source()
# self.open_RNG()
# # Define all the types of results and arguments in the PsyREG dll functions
# REG_dll.PsyREGGetBits.restype = ctypes.c_int32
# REG_dll.PsyREGGetBits.argtypes = [ctypes.c_int32,ctypes.POINTER(ctypes.c_ubyte),ctypes.c_int32,ctypes.c_int32]
# bits_psyreg = ctypes.c_ubyte()
# REG_dll.PsyREGGetBits(source, ctypes.byref(bits_psyreg), max_bits)
# return bits_psyreg.value
def invoke_RNG(self):
#? Call Psyleron; if it's not called it won't know you're talking to him
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGEnumerateSources.restype = ctypes.c_int32
self.REG_dll.PsyREGEnumerateSources.argtypes = []
PsyREG_EnumerateSources = self.REG_dll.PsyREGEnumerateSources()
return PsyREG_EnumerateSources
def get_source(self):
#? Get source from psyleron; if it's not stated, it won't get data, even if it's called
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGGetSource.restype = ctypes.c_int32
self.REG_dll.PsyREGGetSource.argtypes = [ctypes.c_uint32]
PsyREG_GetSource = self.REG_dll.PsyREGGetSource(0)
return PsyREG_GetSource
def open_RNG(self):
#? Open the stream of data to obtain bits and bytes
source = self.get_source()
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGOpen.restype = ctypes.c_int32
self.REG_dll.PsyREGOpen.argtypes = [ctypes.c_int32]
PsyREG_Open = self.REG_dll.PsyREGOpen(source)
return PsyREG_Open
def close_RNG(self):
#? Closes an open DataSource and prevents further interaction
source = self.get_source()
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGClose.restype = ctypes.c_void_p
self.REG_dll.PsyREGClose.argtypes = [ctypes.c_int32]
PsyREG_Close = self.REG_dll.PsyREGClose(source)
return PsyREG_Close
def release_RNG(self):
#? Releases a given source back to the source manager
source = self.get_source()
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGReleaseSource.restype = ctypes.c_void_p
self.REG_dll.PsyREGReleaseSource.argtypes = [ctypes.c_int32]
PsyREG_Release = self.REG_dll.PsyREGReleaseSource(source)
return PsyREG_Release
def clear_RNG(self):
#? Clears the entire list of sources built by one or more calls to EnumerateSources (invoke_RNG)
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGClearSources.restype = ctypes.c_void_p
self.REG_dll.PsyREGClearSources.argtypes = []
PsyREG_Clear = self.REG_dll.PsyREGClearSources()
return PsyREG_Clear
def reset_RNG(self):
#? Signals that the data in the DataSource internal buffer is stale and performs a clear.
source = self.get_source()
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGReset.restype = ctypes.c_void_p
self.REG_dll.PsyREGReset.argtypes = [ctypes.c_int32]
PsyREG_Reset = self.REG_dll.PsyREGReset(source)
return PsyREG_Reset
def get_status(self):
#? Obtain 0 if status is good, 512 if status is bad
self.invoke_RNG()
source = self.get_source()
# Define all the types of results and arguments in the PsyREG dll functions
self.REG_dll.PsyREGGetStatus.restype = ctypes.c_int32
self.REG_dll.PsyREGGetStatus.argtypes = [ctypes.c_int32]
# Pass functions from PsyREG dll
PsyREG_Status = self.REG_dll.PsyREGGetStatus(source)
return PsyREG_Status
def count_PsyREGs(self):
#? Count number of Psylerons connected
self.invoke_RNG()
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGGetSourceCount.restype = ctypes.c_uint32
self.REG_dll.PsyREGGetSourceCount.argtypes = []
PsyREG_GetSourceCount = self.REG_dll.PsyREGGetSourceCount(0)
return PsyREG_GetSourceCount
class Neulog():
#! Not implemented: "ResetSensor:[],[]", "SetPositiveDirection:[],[],[]" & "# SetRFID:[]"
#! Can't support more than 20 samples per second for more than 5 minutes
def __init__(self,host,*additional_sensors):
self.name = "Neulog"
self.host = str(host)
self.sensor_id = '1'
self.set_sensors_id()
self.parameters = ':'
# Check if there's more than 1 sensor given as argument
for sensors in additional_sensors:
self.parameters += '[' + sensors + '],[' + self.sensor_id + '],'
self.parameters = self.parameters[:-1]
def get_url(self,command):
# Construct url query
url = 'http://localhost:'+self.host+'/NeuLogAPI?'+command
return url
def get_data_dict(self,url):
# Obtain data_dict from url request from the url request
data_dict = requests.get(url)
# Convert to json object, so it can be used as dictionary
json_data_dict = json.loads(data_dict.text)
return json_data_dict
def set_sensors_id(self):
# Set the ID of the connected sensors
command = 'SetSensorsID'
parameters = ':['+ self.sensor_id +']'
url = self.get_url(command+parameters)
data_dict = self.get_data_dict(url)
return 'All sensors changed the ID to: ' + data_dict[command]
def set_sensor_range(self, sensor_range):
# Change the range of the sensor (GSR: 1 = Arb, 2 = mS; Pulse: 1 = BPM, 2 = Wave[Arb])
command = 'SetSensorRange'
parameters = self.parameters
sensor_range = ',['+ sensor_range +']'
url = self.get_url(command+parameters+sensor_range)
data_dict = self.get_data_dict(url)
return 'Sensor range changed: ' + data_dict[command]
def get_version(self):
# Obtain the version of the Neulog API
command = 'GetServerVersion'
url = self.get_url(command)
data_dict = self.get_data_dict(url)
return 'Neulog API version: ' + data_dict[command]
def get_status(self):
# Get the status of the server (it's wrongly written as 'sever' in the API)
command = 'GetSeverStatus'
url = self.get_url(command)
data_dict = self.get_data_dict(url)
return 'Neulog API status: ' + data_dict[command]
def get_values(self):
# Obtain values from the sensors
command = 'GetSensorValue'
parameters = self.parameters
url = self.get_url(command+parameters)
data_dict = self.get_data_dict(url)
# Obtains the values from the data_dict
data_list = data_dict[command]
return data_list
# print(data_dict[command])
def exp_start(self,sample_rate,sample_size):
# Start the experiment with the defined parameters; an experiment needs to be stopped before starting a new one
command = 'StartExperiment'
parameters = self.parameters + ',[' + sample_rate + '],[' + sample_size + ']'
url = self.get_url(command+parameters)
data_dict = self.get_data_dict(url)
return 'Start Neulog experiment: ' + data_dict[command] + ' at ' + datetime.now().strftime('%H:%M:%S.%f')[:-3]
def exp_stop(self):
# Stops the experiment; an experiment needs to be stopped before starting a new one
command = 'StopExperiment'
url = self.get_url(command)
data_dict = self.get_data_dict(url)
return 'Stopped Neulog experiment: ' + data_dict[command] + ' at ' + datetime.now().strftime('%H:%M:%S.%f')[:-3]
def get_exp_values(self):
# Obtain values of the current or last ran experiment
command = 'GetExperimentSamples'
parameters = self.parameters
url = self.get_url(command+parameters)
data_dict = self.get_data_dict(url)
num = 0
# for each list of data within the dictionary, delete the first 2 elements (sensor_type and sensor_id)
for lists in data_dict[command]:
del data_dict[command][num][:2]
num += 1
# return list of lists of each sensor_type with only the values recorded
return data_dict[command]
class Create_Window(QDialog):
#& INIT
def __init__(self):
super().__init__()
self.title = "Physiological Anticipatory Activity (PAA) 2.0"
self.path_logo_UPIDE = os.path.join(os.getcwd(), '.\Presentimiento\Logo_UPIDE.png') # Logo UPIDE path
self.setWindowIcon(QtGui.QIcon(self.path_logo_UPIDE))
self.setWindowTitle(self.title)
self.setFixedSize(1600, 800)
# call functions:
self.create_settings_layout()
self.create_data_layout()
self.create_phys_layout()
self.create_stats_layout()
self.create_buttons()
# Create list of stimuli:
self.image_list_neutral = []
self.image_list_neutral_filenames = []
self.image_list_excitatory = []
self.image_list_excitatory_filenames = []
self.image_list = []
self.image_list_filenames = []
# Create the layout in grid fromat for the groups (topleft,topright,etc.)
Tab_Widget = QTabWidget()
main_layout = QVBoxLayout()
Tab_Widget.addTab(self.gb_settings, "Settings")
Tab_Widget.addTab(self.gb_session_data, "Session Data")
Tab_Widget.addTab(self.gb_phys_data, "Physiological Data")
Tab_Widget.addTab(self.gb_stats_data, "Statistical Analysis")
main_layout.addWidget(Tab_Widget)
main_layout.addLayout(self.layout_buttons,2)
self.setLayout(main_layout)
#& LAYOUT
def create_settings_layout(self):
#& GROUP BOXES
#& MAIN
self.gb_settings = QGroupBox("Session settings:")
#& 1. SOURCES & STIMULI
#& 1.1. SOURCES & TESTING
self.gb_sources_n_test = QGroupBox("RNG sources:")
#& 1.2. STIMULI
self.gb_stimuli = QGroupBox("Select stimuli:")
#& 1.3. PHYSIOLOGICAL
self.gb_physiological = QGroupBox("Select physiological data:")
#& 2. EXP DESIGN
self.gb_exp_design = QGroupBox("Experimental design:")
#& 2.1 TRIALS & SESSION
#& 2.1.1. SESSION ID
self.gb_session_id = QGroupBox("Session ID:")
#& 2.1.2. TRIAL TYPE
self.gb_trial_type = QGroupBox("Type of trials:")
#& 2.1.3. TRIALS NUM
self.gb_num_trials = QGroupBox("Number of trials:")
#& 2.2. TRIALS DURATION
self.gb_trial_duration = QGroupBox("Duration of each part of a trial (seconds):")
#& 2.2.1. TRIALS DURATION ELEMENTS
self.gb_pre_screen = QGroupBox("Pre-stimulus screen duration:")
self.gb_stimulus_duration = QGroupBox("Stimulus duration:")
self.gb_post_screen = QGroupBox("Post-stimulus screen duration:")
#& 2.3. DELAYS
self.gb_delays = QGroupBox("Delays with white screen between trials (seconds):")
#& 2.3.1. FIRST SCREEN
self.gb_first_screen = QGroupBox("Only-once before first trial:")
#& 2.3.2. DELAY BEFORE TRIAL
self.gb_before_interval = QGroupBox("Interval before each trial:")
#& 2.3.3. DELAY AFTER TRIAL
self.gb_after_interval = QGroupBox("Interval after each trial:")
#& SPIN BOXES
#& 2.1.1. SESSION ID
self.sb_session_id = QSpinBox()
self.sb_session_id.setValue(1)
#& 2.1.3. TRIALS NUM
self.sb_num_trials = QSpinBox()
self.sb_num_trials.setValue(3) #$ 45
#& 2.2.1. TRIALS DURATION ELEMENTS
self.sb_pre_screen = QSpinBox()
self.sb_pre_screen.setValue(1) #$ 3
self.sb_stim_duration = QSpinBox()
self.sb_stim_duration.setValue(1) #$ 3
self.sb_post_screen = QSpinBox()
self.sb_post_screen.setValue(1) #$ 9
#& 2.3.1. FIRST SCREEN
self.sb_first_screen = QSpinBox()
self.sb_first_screen.setValue(1) #$ 10
#& 2.3.3. DELAY BEFORE TRIAL
self.sb_before_min_interval = QSpinBox()
self.sb_before_min_interval.setValue(0) #$ 0
self.sb_before_max_interval = QSpinBox()
self.sb_before_max_interval.setValue(0) #$ 0
self.sb_before_min_interval.setEnabled(False)
self.sb_before_max_interval.setEnabled(False)
#& 2.3.3. DELAY AFTER TRIAL
self.sb_after_min_interval = QSpinBox()
self.sb_after_min_interval.setValue(0) #$ 0
self.sb_after_max_interval = QSpinBox()
self.sb_after_max_interval.setValue(1) #$ 5
#& COMBO BOXES
#& 1.1. SOURCES
self.combo_rng_sources = QComboBox()
self.combo_rng_sources.addItem("-")
#& 1.3. PHYSIOLOGICAL
self.combo_skin_conductance = QComboBox()
self.combo_skin_conductance.addItem("-")
self.combo_skin_conductance.setDisabled(True)
self.combo_heart_rate = QComboBox()
self.combo_heart_rate.addItem("-")
self.combo_heart_rate.setDisabled(True)
self.combo_brainwaves = QComboBox()
self.combo_brainwaves.addItem("-")
self.combo_brainwaves.setDisabled(True)
self.combo_skin_conductance_sample = QComboBox()
self.combo_heart_rate_sample = QComboBox()
self.combo_brainwaves_sample = QComboBox()
self.combo_skin_conductance_sample.addItems(["20 per second","10 per second","5 per second","2 per second","1 per second"])
self.combo_heart_rate_sample.addItems(["20 per second","10 per second","5 per second","2 per second","1 per second"])
self.combo_brainwaves_sample.addItems(["20 per second","10 per second","5 per second","2 per second","1 per second"])
self.combo_skin_conductance_sample.setDisabled(True)
self.combo_heart_rate_sample.setDisabled(True)
self.combo_brainwaves_sample.setDisabled(True)
#& 2.1.2. TRIAL TYPE
self.combo_trial_type = QComboBox()
self.combo_trial_type.addItem("Free-Running")
self.combo_trial_type.addItem("On-Demand")
self.combo_trial_type.currentIndexChanged.connect(self.click_trial_type)
#& TEXT BOXES
#& 1.1. TESTING & TESTING
self.tb_gen_bits = QLineEdit("") #? Add color to background: gen_bits.setStyleSheet("QLineEdit { background-color: rgb(220,220,220) }")
#& 1.3. PHYSIOLOGICAL
self.tb_neulog_port = QLineEdit("22002") #$ Localhost Port (ej. '22002')
self.tb_skin_conductance_test = QLineEdit("Test")
self.tb_heart_rate_test = QLineEdit("Test")
self.tb_brainwaves_test = QLineEdit("Test")
self.tb_skin_conductance_test.setDisabled(True)
self.tb_heart_rate_test.setDisabled(True)
self.tb_brainwaves_test.setDisabled(True)
#& BUTTONS
#& 1.1. SOURCES & TESTING
butt_refresh_sources = QPushButton('Refresh RNG sources')
butt_refresh_sources.clicked.connect(self.click_refresh_sources)
butt_generate_bits = QPushButton('Test: Generate bits')
butt_generate_bits.clicked.connect(self.click_generate_bits)
#& 1.2. STIMULI
butt_neutral_stimuli = QPushButton("Select neutral stimuli library")
butt_neutral_stimuli.clicked.connect(self.click_neutral_stimuli)
butt_excitatory_stimuli = QPushButton('Select excitatory stimuli library')
butt_excitatory_stimuli.clicked.connect(self.click_excitatory_stimuli)
#& 1.3. PHYSIOLOGICAL
butt_refresh_neulog = QPushButton('Refresh Neulog sources')
butt_refresh_neulog.clicked.connect(self.click_refresh_neulog)
butt_refresh_physiological = QPushButton('Refresh physiological sources')
butt_refresh_physiological.clicked.connect(self.click_refresh_physiological)
self.butt_skin_conductance_test = QPushButton('Test: Get values')
self.butt_skin_conductance_test.clicked.connect(self.click_skin_conductance_test)
self.butt_skin_conductance_test.setDisabled(True)
self.butt_heart_rate_test = QPushButton('Test: Get values')
self.butt_heart_rate_test.clicked.connect(self.click_heart_rate_test)
self.butt_heart_rate_test.setDisabled(True)
self.butt_brainwaves_test = QPushButton('Test: Get values')
self.butt_brainwaves_test.clicked.connect(self.click_brainwaves_test)
self.butt_brainwaves_test.setDisabled(True)
#& CHECK BOXES
#& 1.3. PHYSIOLOGICAL
self.cb_skin_conductance = QCheckBox("Skin Conductance")
self.cb_skin_conductance.toggled.connect(self.check_skin_conductance)
self.cb_heart_rate = QCheckBox("Heart Rate")
self.cb_heart_rate.toggled.connect(self.check_heart_rate)
self.cb_brainwaves = QCheckBox("Brain Waves")
self.cb_brainwaves.toggled.connect(self.check_brainwaves)
#& SET LAYOUTS
# declare layouts
layout_main = QHBoxLayout() #MAIN
layout_source_n_stimuli = QVBoxLayout() # 1. SOURCES & STIMULI
layout_sources_n_test = QGridLayout() # 1.1. SOURCES & TESTING
layout_stimuli = QHBoxLayout() # 1.2. STIMULI
layout_physiological = QGridLayout() # 1.3. PHYSIOLOGICAL
layout_exp_design = QHBoxLayout() # 2. EXP DESIGN
layout_trial_and_screen = QVBoxLayout() # 2.1. TRIALS
layout_session_id = QVBoxLayout() # 2.1.1. SESSION ID
layout_trial_type = QVBoxLayout() # 2.1.2. TRIAL TYPE
layout_trial = QVBoxLayout() # 2.1.3. TRIALS NUM
layout_duration = QGridLayout() # 2.2. TRIALS DURATION
layout_dur_pre = QVBoxLayout() # 2.2.1. TRIALS DURATION ELEMENTS
layout_dur_stimulus = QVBoxLayout() # 2.2.1. TRIALS DURATION ELEMENTS
layout_dur_post = QVBoxLayout() # 2.2.1. TRIALS DURATION ELEMENTS
layout_delays = QVBoxLayout() # 2.3. DELAYS
layout_f_screen = QVBoxLayout() #2.3.1. FIRST SCREEN
layout_before_interval = QGridLayout()# 2.3.2. DELAY BEFORE TRIAL
layout_after_interval = QGridLayout()# 2.3.3. DELAY AFTER TRIAL
#& MAIN
layout_main.addLayout(layout_source_n_stimuli,1)
layout_main.addWidget(self.gb_exp_design,1)
self.gb_settings.setLayout(layout_main)
#& 1. SOURCES & STIMULI
layout_source_n_stimuli.addWidget(self.gb_sources_n_test)
layout_source_n_stimuli.addWidget(self.gb_stimuli)
layout_source_n_stimuli.addWidget(self.gb_physiological)
#& 1.1. SOURCES & TESTING
layout_sources_n_test.addWidget(self.combo_rng_sources,0,0,1,3)
layout_sources_n_test.addWidget(butt_refresh_sources,0,3,1,1)
layout_sources_n_test.addWidget(self.tb_gen_bits,0,4,1,3)
layout_sources_n_test.addWidget(butt_generate_bits,0,7,1,1)
self.gb_sources_n_test.setLayout(layout_sources_n_test)
#& 1.2. STIMULI
layout_stimuli.addWidget(butt_neutral_stimuli)
layout_stimuli.addWidget(butt_excitatory_stimuli)
self.gb_stimuli.setLayout(layout_stimuli)
#& 1.3. PHYSIOLOGICAL
layout_physiological.addWidget(self.tb_neulog_port,0,0,1,1)
layout_physiological.addWidget(butt_refresh_neulog,0,1,1,1)
layout_physiological.addWidget(butt_refresh_physiological,0,2,1,5)
layout_physiological.addWidget(self.cb_skin_conductance,1,0,1,1)
layout_physiological.addWidget(self.cb_heart_rate,2,0,1,1)
layout_physiological.addWidget(self.cb_brainwaves,3,0,1,1)
layout_physiological.addWidget(self.combo_skin_conductance,1,1,1,1)
layout_physiological.addWidget(self.combo_heart_rate,2,1,1,1)
layout_physiological.addWidget(self.combo_brainwaves,3,1,1,1)
layout_physiological.addWidget(self.tb_skin_conductance_test,1,2,1,2)
layout_physiological.addWidget(self.tb_heart_rate_test,2,2,1,2)
layout_physiological.addWidget(self.tb_brainwaves_test,3,2,1,2)
layout_physiological.addWidget(self.butt_skin_conductance_test,1,4,1,1)
layout_physiological.addWidget(self.butt_heart_rate_test,2,4,1,1)
layout_physiological.addWidget(self.butt_brainwaves_test,3,4,1,1)
layout_physiological.addWidget(self.combo_skin_conductance_sample,1,5,1,2)
layout_physiological.addWidget(self.combo_heart_rate_sample,2,5,1,2)
layout_physiological.addWidget(self.combo_brainwaves_sample,3,5,1,2)
self.gb_physiological.setLayout(layout_physiological)
#& 2. EXP DESIGN
layout_exp_design.addLayout(layout_trial_and_screen)
layout_exp_design.addWidget(self.gb_trial_duration)
layout_exp_design.addWidget(self.gb_delays,1)
self.gb_exp_design.setLayout(layout_exp_design)
#& 2.1 TRIALS & SESSION
layout_trial_and_screen.addWidget(self.gb_session_id)
layout_trial_and_screen.addWidget(self.gb_trial_type)
layout_trial_and_screen.addWidget(self.gb_num_trials)
#& 2.1.1. SESSION ID
layout_session_id.addWidget(self.sb_session_id)
self.gb_session_id.setLayout(layout_session_id)
#& 2.1.2. TRIAL TYPE
layout_trial_type.addWidget(self.combo_trial_type)
self.gb_trial_type.setLayout(layout_trial_type)
#& 2.1.3. TRIALS NUM
layout_trial.addWidget(self.sb_num_trials)
self.gb_num_trials.setLayout(layout_trial)
#& 2.2. TRIALS DURATION
layout_duration.addWidget(self.gb_pre_screen,0,0)
layout_duration.addWidget(self.gb_stimulus_duration,1,0)
layout_duration.addWidget(self.gb_post_screen,2,0)
self.gb_trial_duration.setLayout(layout_duration)
#& 2.2.1. TRIALS DURATION ELEMENTS
layout_dur_pre.addWidget(self.sb_pre_screen)
self.gb_pre_screen.setLayout(layout_dur_pre)
layout_dur_stimulus.addWidget(self.sb_stim_duration)
self.gb_stimulus_duration.setLayout(layout_dur_stimulus)
layout_dur_post.addWidget(self.sb_post_screen)
self.gb_post_screen.setLayout(layout_dur_post)
#& 2.3. DELAYS
layout_delays.addWidget(self.gb_first_screen)
layout_delays.addWidget(self.gb_before_interval)
layout_delays.addWidget(self.gb_after_interval)
self.gb_delays.setLayout(layout_delays)
#& 2.3.1. FIRST SCREEN
layout_f_screen.addWidget(self.sb_first_screen)
self.gb_first_screen.setLayout(layout_f_screen)
#& 2.3.1. BEFORE TRIAL
layout_before_interval.addWidget(self.sb_before_min_interval,0,0)
layout_before_interval.addWidget(self.sb_before_max_interval,0,1)
self.gb_before_interval.setLayout(layout_before_interval)
#& 2.3.1. AFTER TRIAL
layout_after_interval.addWidget(self.sb_after_min_interval,0,0)
layout_after_interval.addWidget(self.sb_after_max_interval,0,1)
self.gb_after_interval.setLayout(layout_after_interval)
def create_data_layout(self):
#& GROUP BOXES
self.gb_session_data = QGroupBox("Session Data:")
#& TEXT BOX
# Create text boxes
self.tb_start_at = QLineEdit("Session started at:") #? Add color to background: tb_start_at.setStyleSheet("QLineEdit { background-color: rgb(220,220,220) }")
self.tb_finish_at = QLineEdit("Session finished at:")
self.tb_onset_at = QLineEdit("First trial started at:")
self.tb_stimulus_id = QTextEdit("Stimulus ID:")
self.tb_trial_id = QTextEdit("Trial ID:")
self.tb_time_start_trial = QTextEdit("Time at the start of trial:")
self.tb_dur_before_interval = QTextEdit("Interval before each trial (s):")
self.tb_onset_to_trial = QTextEdit("First trial to end of this trial (s):")
self.tb_seconds_end_trial = QTextEdit("Duration of each trial (s):")
self.tb_dur_after_interval = QTextEdit("Interval after each trial (s):")
self.tb_time_end_trial = QTextEdit("Time at the end of trial:")
#& SET LAYOUT
layout = QGridLayout()
#top lane
layout.addWidget(self.tb_start_at,0,0,1,3)
layout.addWidget(self.tb_onset_at,0,3,1,2)
layout.addWidget(self.tb_finish_at,0,5,1,3)
# below lane
layout.addWidget(self.tb_trial_id,1,0,5,1)
layout.addWidget(self.tb_stimulus_id,1,1,5,1)
layout.addWidget(self.tb_time_start_trial,1,2,5,1)
layout.addWidget(self.tb_time_end_trial,1,3,5,1)
layout.addWidget(self.tb_dur_before_interval,1,4,5,1)
layout.addWidget(self.tb_dur_after_interval,1,5,5,1)
layout.addWidget(self.tb_seconds_end_trial,1,6,5,1)
layout.addWidget(self.tb_onset_to_trial,1,7,5,1)
self.gb_session_data.setLayout(layout)
def create_phys_layout(self):
#& GROUP BOXES
self.gb_phys_data = QGroupBox("")
self.gb_phys_time = QGroupBox("Physiologial Time Data:")
self.gb_phys_trial_inst = QGroupBox("Trials and Instances:")
self.gb_phys_skin_conductance = QGroupBox("Skin Conductance Data:")
self.gb_phys_heart_rate = QGroupBox("Heart Rate Data:")
self.gb_phys_brainwaves = QGroupBox("Brainwaves Data:")
#& TEXT BOX
# Create text boxes
self.tb_phys_trial_id = QTextEdit("Trial ID [n]:")
self.tb_phys_instance_id = QTextEdit("Instance [i]:")
self.tb_phys_start_at = QLineEdit("Physiological data started at:")
self.tb_phys_finish_at = QLineEdit("Physiological data finished at:")
self.tb_skin_conductance_values = QTextEdit("Skin conductance values [xi]:")
self.tb_skin_conductance_timestamp = QTextEdit("Skin conductance timestamps [t_xi]:")
self.tb_heart_rate_values = QTextEdit("Heart rate values [yi]:")
self.tb_heart_rate_timestamp = QTextEdit("Heart rate timestamps [t_yi]:")
self.tb_brainwaves_values = QTextEdit("Brainwaves values [zi]:")
self.tb_brainwaves_timestamp = QTextEdit("Brainwaves timestamps [t_zi]:")
self.tb_skin_conductance_media = QTextEdit("Skin conductance media [mx_paa]:")
self.tb_skin_conductance_sd = QTextEdit("Skin conductance sd [sx_paa]:")
self.tb_skin_conductance_Z = QTextEdit("Skin conductance Z [Z_xi]:")
self.tb_skin_conductance_f = QTextEdit("Skin conductance f [f_xi]:")
self.tb_heart_rate_media = QTextEdit("Heart rate media [my_paa]:")
self.tb_heart_rate_sd = QTextEdit("Heart rate sd [sy_paa]:")
self.tb_heart_rate_Z = QTextEdit("Heart rate Z [Z_yi]:")
self.tb_heart_rate_f = QTextEdit("Heart rate f [f_yi]:")
self.tb_brainwaves_media = QTextEdit("Brainwaves media [mz_paa]:")
self.tb_brainwaves_sd = QTextEdit("Brainwaves sd [sz_paa]:")
self.tb_brainwaves_Z = QTextEdit("Brainwaves Z [Z_zi]:")
self.tb_brainwaves_f = QTextEdit("Brainwaves f [f_zi]:")
#& SET LAYOUT
main_layout = QGridLayout()
time_layout = QGridLayout()
trial_inst_layout = QGridLayout()
skin_conductance_layout = QGridLayout()
heart_rate_layout = QGridLayout()
brainwaves_layout = QGridLayout()
# time layout
time_layout.addWidget(self.tb_phys_start_at,0,0,1,4)
time_layout.addWidget(self.tb_phys_finish_at,0,4,1,4)
# trial and instances layout
trial_inst_layout.addWidget(self.tb_phys_trial_id,0,0,15,1)
trial_inst_layout.addWidget(self.tb_phys_instance_id,0,1,15,1)
# skin conductance layout
skin_conductance_layout.addWidget(self.tb_skin_conductance_values,0,0,5,1)
skin_conductance_layout.addWidget(self.tb_skin_conductance_timestamp,0,1,5,1)
skin_conductance_layout.addWidget(self.tb_skin_conductance_media,5,0,5,1)
skin_conductance_layout.addWidget(self.tb_skin_conductance_sd,5,1,5,1)
skin_conductance_layout.addWidget(self.tb_skin_conductance_Z,10,0,5,1)
skin_conductance_layout.addWidget(self.tb_skin_conductance_f,10,1,5,1)
# heart rate layout
heart_rate_layout.addWidget(self.tb_heart_rate_values,0,0,5,1)
heart_rate_layout.addWidget(self.tb_heart_rate_timestamp,0,1,5,1)
heart_rate_layout.addWidget(self.tb_heart_rate_media,5,0,5,1)
heart_rate_layout.addWidget(self.tb_heart_rate_sd,5,1,5,1)
heart_rate_layout.addWidget(self.tb_heart_rate_Z,10,0,5,1)
heart_rate_layout.addWidget(self.tb_heart_rate_f,10,1,5,1)
# brainwaves layout
brainwaves_layout.addWidget(self.tb_brainwaves_values,0,0,5,1)
brainwaves_layout.addWidget(self.tb_brainwaves_timestamp,0,1,5,1)
brainwaves_layout.addWidget(self.tb_brainwaves_media,5,0,5,1)
brainwaves_layout.addWidget(self.tb_brainwaves_sd,5,1,5,1)
brainwaves_layout.addWidget(self.tb_brainwaves_Z,10,0,5,1)
brainwaves_layout.addWidget(self.tb_brainwaves_f,10,1,5,1)
# Apply layouts
self.gb_phys_time.setLayout(time_layout)
self.gb_phys_trial_inst.setLayout(trial_inst_layout)
self.gb_phys_skin_conductance.setLayout(skin_conductance_layout)
self.gb_phys_heart_rate.setLayout(heart_rate_layout)
self.gb_phys_brainwaves.setLayout(brainwaves_layout)
# Apply main layout
main_layout.addWidget(self.gb_phys_time,0,0,1,8)
main_layout.addWidget(self.gb_phys_trial_inst,1,0,15,2)
main_layout.addWidget(self.gb_phys_skin_conductance,1,2,15,2)
main_layout.addWidget(self.gb_phys_heart_rate,1,4,15,2)
main_layout.addWidget(self.gb_phys_brainwaves,1,6,15,2)
self.gb_phys_data.setLayout(main_layout)
def create_stats_layout(self):
#& GROUP BOXES
self.gb_stats_data = QGroupBox("")
self.gb_stats_permut = QGroupBox("Randomized Permutation Settings:")
self.gb_stats_analysis = QGroupBox("Statistical Analysis Data:")
self.gb_stats_phys = QGroupBox("Include in analysis?:")
self.gb_stats_phys_D = QGroupBox("Physiological Difference D [D = Σ FnE - Σ FnN]:")
self.gb_stats_results = QGroupBox("Physiological Standard Normal Deviate Z [Z = (D – μD’)/ σD’]:")
#& TEXT BOX
self.tb_stats_ratio_n = QLineEdit("")
self.tb_stats_ratio_e = QLineEdit("")
self.tb_stats_shuffle = QLineEdit("5000")
self.tb_stats_session_id = QTextEdit("Session ID [S]:")
self.tb_stats_trial_id = QTextEdit("Trial ID [n]:")
self.tb_skin_conductance_ZD = QLineEdit("Skin conductance ZD:")
self.tb_skin_conductance_D = QLineEdit("Skin conductance D:")
self.tb_skin_conductance_Fn = QTextEdit("Skin conductance Fn [SUM_fx_paa]:")
self.tb_heart_rate_ZD = QLineEdit("Heart rate ZD:")
self.tb_heart_rate_D = QLineEdit("Heart rate D:")
self.tb_heart_rate_Fn = QTextEdit("Heart rate Fn [SUM_fy_paa]:")
self.tb_brainwaves_ZD = QLineEdit("Brainwaves ZD:")
self.tb_brainwaves_D = QLineEdit("Brainwaves D:")
self.tb_brainwaves_Fn = QTextEdit("Brainwaves Fn [SUM_fz_paa]:")
#& LABELS
self.lb_stats_ratio = QLabel("Ratio (E:N):")
self.lb_stats_dotdot = QLabel(":")
self.lb_stats_shuffle = QLabel('Randomized permutation cycles:')
#& CHECKBOXES
self.cb_stats_skin_conductance = QCheckBox("Skin Conductance")
self.cb_stats_heart_rate = QCheckBox("Heart Rate")
self.cb_stats_brainwaves = QCheckBox("Brainwaves")
#& BUTTONS
butt_shuffle = QPushButton('BEGIN ANALYSIS')
butt_shuffle.clicked.connect(self.click_shuffle)
#& SET LAYOUT
main_layout = QGridLayout()
ratio_layout = QHBoxLayout()
shuffle_layout = QHBoxLayout()
permut_layout = QGridLayout()
analysis_layout = QHBoxLayout()
phys_layout = QHBoxLayout()
phys_D_layout = QHBoxLayout()
results_layout = QHBoxLayout()
# permut layout
ratio_layout.addWidget(self.lb_stats_ratio)
ratio_layout.addWidget(self.tb_stats_ratio_e)
ratio_layout.addWidget(self.lb_stats_dotdot)
ratio_layout.addWidget(self.tb_stats_ratio_n)
shuffle_layout.addWidget(self.lb_stats_shuffle)
shuffle_layout.addWidget(self.tb_stats_shuffle)
phys_layout.addWidget(self.cb_stats_skin_conductance)
phys_layout.addWidget(self.cb_stats_heart_rate)
phys_layout.addWidget(self.cb_stats_brainwaves)
self.gb_stats_phys.setLayout(phys_layout)
phys_D_layout.addWidget(self.tb_skin_conductance_D)
phys_D_layout.addWidget(self.tb_heart_rate_D)
phys_D_layout.addWidget(self.tb_brainwaves_D)
self.gb_stats_phys_D.setLayout(phys_D_layout)
permut_layout.addLayout(ratio_layout,0,0,1,1)
permut_layout.addLayout(shuffle_layout,1,0,1,1)
permut_layout.addWidget(self.gb_stats_phys,0,1,2,2)
permut_layout.addWidget(self.gb_stats_phys_D,0,3,2,2)
# session and trials layout
analysis_layout.addWidget(self.tb_stats_session_id)
analysis_layout.addWidget(self.tb_stats_trial_id)
analysis_layout.addWidget(self.tb_skin_conductance_Fn)
analysis_layout.addWidget(self.tb_heart_rate_Fn)
analysis_layout.addWidget(self.tb_brainwaves_Fn)
# Results layout
results_layout.addWidget(self.tb_skin_conductance_ZD)
results_layout.addWidget(self.tb_heart_rate_ZD)
results_layout.addWidget(self.tb_brainwaves_ZD)
# Apply layouts
self.gb_stats_permut.setLayout(permut_layout)
self.gb_stats_analysis.setLayout(analysis_layout)
self.gb_stats_results.setLayout(results_layout)
# Apply main layout
main_layout.addWidget(self.gb_stats_permut,0,0,4,5)
main_layout.addWidget(butt_shuffle,4,0,1,5)
main_layout.addWidget(self.gb_stats_analysis,5,0,10,5)
main_layout.addWidget(self.gb_stats_results,15,2,1,3)
self.gb_stats_data.setLayout(main_layout)
def create_buttons(self):
#& BUTTONS
self.butt_start_session = QPushButton("START SESSION")
self.butt_start_session.clicked.connect(self.click_start_session)
self.butt_stop = QPushButton("STOP SESSION")
self.butt_stop.clicked.connect(self.click_stop)
self.butt_clear_data = QPushButton("Clear All Data")
self.butt_clear_data.clicked.connect(self.click_clear_data)
self.butt_export_CSV = QPushButton("Export Session Data to CSV")
self.butt_export_CSV.clicked.connect(self.click_export_CSV)
self.butt_export_CSV_phys = QPushButton("Export Physiological Data to CSV")
self.butt_export_CSV_phys.clicked.connect(self.click_export_CSV_phys)
#& SET LAYOUT
self.layout_buttons = QGridLayout()
self.layout_buttons.addWidget(self.butt_start_session,0,0,1,4)
self.layout_buttons.addWidget(self.butt_stop,1,0)
self.layout_buttons.addWidget(self.butt_clear_data,1,1)
self.layout_buttons.addWidget(self.butt_export_CSV,1,2)
self.layout_buttons.addWidget(self.butt_export_CSV_phys,1,3)
#& CLICK BUTTONS
def click_start_session(self):
# Call start_session with stated number of trials
self.start_session(int(self.sb_num_trials.value()))
def click_refresh_physiological(self): #!
pass
def click_refresh_neulog(self):
# Create neulog class
neu = Neulog(self.tb_neulog_port.text())
# Check the status of the Neulog server
status = neu.get_status()
# If server is ready, clear combos, add "neulog" and message "Ready"
if status == 'Neulog API status: Ready':
self.combo_skin_conductance.clear()
self.combo_heart_rate.clear()
self.combo_brainwaves.clear()
self.combo_skin_conductance.addItem(neu.name)
self.combo_heart_rate.addItem(neu.name)
self.combo_brainwaves.addItem(neu.name)
QMessageBox.about(self, "Neulog", "Neulog API status: Ready")
# If server is in experiment, stop experiment and message "stopping experiment"
elif status == 'Neulog API status: Experiment':
QMessageBox.about(self, "Neulog", "Stopping Neulog experiment, try again...")
neu.exp_stop()
else:
QMessageBox.about(self, "Neulog", "Impossible to connect, check port number")
def click_skin_conductance_test(self):
# Create neulog class with GSR sensor
neu = Neulog(self.tb_neulog_port.text(), 'GSR')
# Set GSR sensor range to miliSiemens
neu.set_sensor_range('2')
# if neulog is selected...
if neu.name == self.combo_skin_conductance.currentText():
# Obtain values
self.tb_skin_conductance_test.setText("GSR: " + str(neu.get_values()[0]))
else:
pass
def click_heart_rate_test(self):
# Create neulog class with Pulse sensor
neu = Neulog(self.tb_neulog_port.text(), 'Pulse')
neu.set_sensor_range('1')
# if neulog is selected...
if neu.name == self.combo_heart_rate.currentText():
# Obtain values
self.tb_heart_rate_test.setText("Pulse: " + str(neu.get_values()[0]))
else:
pass
def click_brainwaves_test(self): #!
pass
def check_skin_conductance(self):
if self.cb_skin_conductance.isChecked():
self.combo_skin_conductance.setEnabled(True)
self.tb_skin_conductance_test.setEnabled(True)
self.butt_skin_conductance_test.setEnabled(True)
self.combo_skin_conductance_sample.setEnabled(True)
else:
self.combo_skin_conductance.setEnabled(False)
self.tb_skin_conductance_test.setEnabled(False)
self.butt_skin_conductance_test.setEnabled(False)
self.combo_skin_conductance_sample.setEnabled(False)
def check_heart_rate(self):
if self.cb_heart_rate.isChecked():
self.combo_heart_rate.setEnabled(True)
self.tb_heart_rate_test.setEnabled(True)
self.butt_heart_rate_test.setEnabled(True)
self.combo_heart_rate_sample.setEnabled(True)
else:
self.combo_heart_rate.setEnabled(False)
self.tb_heart_rate_test.setEnabled(False)
self.butt_heart_rate_test.setEnabled(False)
self.combo_heart_rate_sample.setEnabled(False)
def check_brainwaves(self):
if self.cb_brainwaves.isChecked():
self.combo_brainwaves.setEnabled(True)
self.tb_brainwaves_test.setEnabled(True)
self.butt_brainwaves_test.setEnabled(True)
self.combo_brainwaves_sample.setEnabled(True)
else:
self.combo_brainwaves.setEnabled(False)
self.tb_brainwaves_test.setEnabled(False)
self.butt_brainwaves_test.setEnabled(False)
self.combo_brainwaves_sample.setEnabled(False)
def click_refresh_sources(self):
self.combo_rng_sources.clear()
pseudo = Pseudo_RNG()
self.combo_rng_sources.addItem(pseudo.name)
psyleron = PsyREG()
if psyleron.count_PsyREGs() >= 1:
self.combo_rng_sources.addItem(str(psyleron.get_name()))
else:
pass
def click_generate_bits(self):
self.tb_gen_bits.clear()
# self.gen_bits.setText("00101")
psyleron = PsyREG()
pseudo = Pseudo_RNG()
if str(psyleron.get_name()) == self.combo_rng_sources.currentText():
if psyleron.count_PsyREGs() >= 1:
self.tb_gen_bits.setText("Psyleron:" + str(psyleron.get_bits(6)))
psyleron.clear_RNG()
psyleron.release_RNG()
else:
QMessageBox.about(self, "ERROR", "Psyleron didn't send bits")
else:
if pseudo.name == self.combo_rng_sources.currentText():
self.tb_gen_bits.setText("Pseudo-RNG:" + str(pseudo.get_bits(6)))
else:
QMessageBox.about(self, "ERROR", "Pseudo-RNG didn't send bits")
def click_clear_data(self):
# Establish again the normal texts
self.tb_start_at.setText("Session started at:")
self.tb_finish_at.setText("Session finished at:")
self.tb_onset_at.setText("First trial started at:")
self.tb_skin_conductance_D.setText("Skin conductance D:")
self.tb_heart_rate_D.setText("Heart rate D:")
self.tb_brainwaves_D.setText("Brainwaves D:")
self.tb_trial_id.setText("Trial ID:")
self.tb_stimulus_id.setText("Stimulus ID:")
self.tb_time_start_trial.setText("Time at the start of trial:")
self.tb_onset_to_trial.setText("First trial to end of this trial (s):")
self.tb_seconds_end_trial.setText("Duration of each trial (s):")
self.tb_dur_after_interval.setText("Interval after each trial (s):")
self.tb_dur_before_interval.setText("Interval before trial (s):")
self.tb_time_end_trial.setText("Time at the end of trial:")
self.tb_skin_conductance_Fn.setText("Skin conductance Fn [Σf_xi_paa]:")
self.tb_heart_rate_Fn.setText("Heart rate Fn [Σf_yi_paa]:")
self.tb_brainwaves_Fn.setText("Brainwaves Fn [Σf_zi_paa]:")
self.tb_phys_start_at.setText("Physiological data started at:")
self.tb_phys_finish_at.setText("Physiological data finished at:")
self.tb_phys_trial_id.setText("Trial ID [n]:")
self.tb_phys_instance_id.setText("Instance [i]:")
self.tb_skin_conductance_values.setText("Skin conductance values [xi]:")
self.tb_skin_conductance_timestamp.setText("Skin conductance timestamps [t_xi]:")
self.tb_skin_conductance_media.setText("Skin conductance media [mx_paa]:")
self.tb_skin_conductance_sd.setText("Skin conductance sd [sx_paa]:")
self.tb_skin_conductance_Z.setText("Skin conductance Z [Z_xi]:")
self.tb_skin_conductance_f.setText("Skin conductance f [f_xi]:")
self.tb_heart_rate_values.setText("Heart rate values [yi]:")
self.tb_heart_rate_timestamp.setText("Heart rate timestamps [t_yi]:")
self.tb_heart_rate_media.setText("Heart rate media [my_paa]:")
self.tb_heart_rate_sd.setText("Heart rate sd [sy_paa]:")
self.tb_heart_rate_Z.setText("Heart rate Z [Z_yi]:")
self.tb_heart_rate_f.setText("Heart rate f [f_yi]:")
self.tb_brainwaves_values.setText("Brainwaves values [zi]:")
self.tb_brainwaves_timestamp.setText("Brainwaves timestamps [t_zi]:")
self.tb_brainwaves_media.setText("Brainwaves media [mz_paa]:")
self.tb_brainwaves_sd.setText("Brainwaves sd [sz_paa]:")
self.tb_brainwaves_Z.setText("Brainwaves Z [Z_zi]:")
self.tb_brainwaves_f.setText("Brainwaves f [f_zi]:")
def click_export_CSV_phys(self):
# Convert text in textbox to string
str_session_id = "S" + self.sb_session_id.text()
str_phys_trial_id = self.tb_phys_trial_id.toPlainText()
str_phys_instance_id = self.tb_phys_instance_id.toPlainText()
str_skin_conductance_values = self.tb_skin_conductance_values.toPlainText()
str_skin_conductance_timestamp = self.tb_skin_conductance_timestamp.toPlainText()
str_skin_conductance_media = self.tb_skin_conductance_media.toPlainText()
str_skin_conductance_sd = self.tb_skin_conductance_sd.toPlainText()
str_skin_conductance_Z = self.tb_skin_conductance_Z.toPlainText()
str_skin_conductance_f = self.tb_skin_conductance_f.toPlainText()
str_heart_rate_values = self.tb_heart_rate_values.toPlainText()
str_heart_rate_timestamp = self.tb_heart_rate_timestamp.toPlainText()
str_heart_rate_media = self.tb_heart_rate_media.toPlainText()
str_heart_rate_sd = self.tb_heart_rate_sd.toPlainText()
str_heart_rate_Z = self.tb_heart_rate_Z.toPlainText()
str_heart_rate_f = self.tb_heart_rate_f.toPlainText()
str_brainwaves_values = self.tb_brainwaves_values.toPlainText()
str_brainwaves_timestamp = self.tb_brainwaves_timestamp.toPlainText()
str_brainwaves_media = self.tb_brainwaves_media.toPlainText()
str_brainwaves_sd = self.tb_brainwaves_sd.toPlainText()
str_brainwaves_Z = self.tb_brainwaves_Z.toPlainText()
str_brainwaves_f = self.tb_brainwaves_f.toPlainText()
# Convert string to list
list_session_id = str_session_id.split("\n")
list_phys_trial_id = str_phys_trial_id.split("\n")
list_phys_instance_id = str_phys_instance_id.split("\n")
list_skin_conductance_values = str_skin_conductance_values.split("\n")
list_skin_conductance_timestamp = str_skin_conductance_timestamp.split("\n")
list_skin_conductance_media = str_skin_conductance_media.split("\n")
list_skin_conductance_sd = str_skin_conductance_sd.split("\n")
list_skin_conductance_Z = str_skin_conductance_Z.split("\n")
list_skin_conductance_f = str_skin_conductance_f.split("\n")
list_heart_rate_values = str_heart_rate_values.split("\n")
list_heart_rate_timestamp = str_heart_rate_timestamp.split("\n")
list_heart_rate_media = str_heart_rate_media.split("\n")
list_heart_rate_sd = str_heart_rate_sd.split("\n")
list_heart_rate_Z = str_heart_rate_Z.split("\n")
list_heart_rate_f = str_heart_rate_f.split("\n")
list_brainwaves_values = str_brainwaves_values.split("\n")
list_brainwaves_timestamp = str_brainwaves_timestamp.split("\n")
list_brainwaves_media = str_brainwaves_media.split("\n")
list_brainwaves_sd = str_brainwaves_sd.split("\n")
list_brainwaves_Z = str_brainwaves_Z.split("\n")
list_brainwaves_f = str_brainwaves_f.split("\n")
# Remove first line in each of the session data lists
del list_phys_trial_id[0]
del list_phys_instance_id[0]
del list_skin_conductance_values[0]
del list_skin_conductance_timestamp[0]
del list_skin_conductance_media[0]
del list_skin_conductance_sd[0]
del list_skin_conductance_Z[0]
del list_skin_conductance_f[0]
del list_heart_rate_values[0]
del list_heart_rate_timestamp[0]
del list_heart_rate_media[0]
del list_heart_rate_sd[0]
del list_heart_rate_Z[0]
del list_heart_rate_f[0]
del list_brainwaves_values[0]
del list_brainwaves_timestamp[0]
del list_brainwaves_media[0]
del list_brainwaves_sd[0]
del list_brainwaves_Z[0]
del list_brainwaves_f[0]
# Convert list to series
ser_session_id = pandas.Series(list_session_id, name='Session ID [S]:')
ser_phys_trial_id = pandas.Series(list_phys_trial_id, name='Trial ID [n]:')
ser_phys_instance_id = pandas.Series(list_phys_instance_id, name='Instance ID [i]:')
ser_skin_conductance_values = pandas.Series(list_skin_conductance_values, name='Skin Conductance Values[xi]:')
ser_skin_conductance_timestamp = pandas.Series(list_skin_conductance_timestamp, name='Skin Conductance Timestamp[t_xi]:')
ser_skin_conductance_media = pandas.Series(list_skin_conductance_media, name='Skin Conductance Media[mx_paa]:')
ser_skin_conductance_sd = pandas.Series(list_skin_conductance_sd, name='Skin Conductance SD [sx_paa]:')
ser_skin_conductance_Z = pandas.Series(list_skin_conductance_Z, name='Skin Conductance Z [Z_xi]:')
ser_skin_conductance_f = pandas.Series(list_skin_conductance_f, name='Skin Conductance f [f_xi]:')
ser_heart_rate_values = pandas.Series(list_heart_rate_values, name='Heart Rate Values [yi]:')
ser_heart_rate_timestamp = pandas.Series(list_heart_rate_timestamp, name='Heart Rate Timestamp [t_yi]:')
ser_heart_rate_media = pandas.Series(list_heart_rate_media, name='Heart Rate Media [my_paa]:')
ser_heart_rate_sd = pandas.Series(list_heart_rate_sd, name='Heart Rate SD [sy_paa]:')
ser_heart_rate_Z = pandas.Series(list_heart_rate_Z, name='Heart Rate Z [Z_yi]:')
ser_heart_rate_f = pandas.Series(list_heart_rate_f, name='Heart Rate f [f_yi]:')
ser_brainwaves_values = pandas.Series(list_brainwaves_values, name='Brainwaves Values [zi]:')
ser_brainwaves_timestamp = pandas.Series(list_brainwaves_timestamp, name='Brainwaves Timestamp [t_zi]:')
ser_brainwaves_media = pandas.Series(list_brainwaves_media, name='Brainwaves Media [mz_paa]:')
ser_brainwaves_sd = pandas.Series(list_brainwaves_sd, name='Brainwaves SD [sz_paa]:')
ser_brainwaves_Z = pandas.Series(list_brainwaves_Z, name='Brainwaves Z [Z_zi]:')
ser_brainwaves_f = pandas.Series(list_brainwaves_f, name='Brainwaves f [f_zi]:')
# Generate dataframe by concatenating the series
df = pandas.concat([ser_session_id,
ser_phys_trial_id,
ser_phys_instance_id,
ser_skin_conductance_values,
ser_skin_conductance_timestamp,
ser_skin_conductance_media,
ser_skin_conductance_sd,
ser_skin_conductance_Z,
ser_skin_conductance_f,
ser_heart_rate_values,
ser_heart_rate_timestamp,
ser_heart_rate_media,
ser_heart_rate_sd,
ser_heart_rate_Z,
ser_heart_rate_f,
ser_brainwaves_values,
ser_brainwaves_timestamp,
ser_brainwaves_media,
ser_brainwaves_sd,
ser_brainwaves_Z,
ser_brainwaves_f], axis=1)
# Obtain the path for the file to be saved
save_path_name, _ = QFileDialog.getSaveFileName(self, 'Save File')
df.to_csv(save_path_name, index=False, encoding='ANSI') # "header=False" if want to remove headers
# print(df)
print(save_path_name)
def click_export_CSV(self):
# Convert text in textbox to string
str_start_at = self.tb_start_at.text()
str_finish_at = self.tb_finish_at.text()
str_onset_at = self.tb_onset_at.text()
str_skin_conductance_D = self.tb_skin_conductance_D.text()
str_heart_rate_D = self.tb_heart_rate_D.text()
str_brainwaves_D = self.tb_brainwaves_D.text()
str_trial_id = self.tb_trial_id.toPlainText()
str_stimulus_id = self.tb_stimulus_id.toPlainText()
str_time_start_trial = self.tb_time_start_trial.toPlainText()
str_onset_to_trial = self.tb_onset_to_trial.toPlainText()
str_seconds_end_trial = self.tb_seconds_end_trial.toPlainText()
str_dur_after_interval = self.tb_dur_after_interval.toPlainText()
str_dur_before_interval = self.tb_dur_before_interval.toPlainText()
str_time_end_trial = self.tb_time_end_trial.toPlainText()
str_skin_conductance_Fn = self.tb_skin_conductance_Fn.toPlainText()
str_heart_rate_Fn = self.tb_heart_rate_Fn.toPlainText()
str_brainwaves_Fn = self.tb_brainwaves_Fn.toPlainText()
# Remove specific text from strings
str_start_at = str_start_at.replace('Session started at: ', '')
str_finish_at = str_finish_at.replace('Session finished at: ', '')
str_onset_at = str_onset_at.replace('First trial started at: ', '')
str_skin_conductance_D = str_skin_conductance_D.replace('Skin conductance D [SUM(FnE)-SUM(FnN)]: ', '')
str_heart_rate_D = str_heart_rate_D.replace('Heart rate D [SUM(FnE)-SUM(FnN)]: ', '')
str_brainwaves_D = str_brainwaves_D.replace('Brainwaves D [SUM(FnE)-SUM(FnN)]: ', '')
# Convert string to list
list_start_at = str_start_at.split("\n")
list_finish_at = str_finish_at.split("\n")
list_onset_at = str_onset_at.split("\n")
list_skin_conductance_D = str_skin_conductance_D.split("\n")
list_heart_rate_D = str_heart_rate_D.split("\n")
list_brainwaves_D = str_brainwaves_D.split("\n")
list_trial_id = str_trial_id.split("\n")
list_stimulus_id = str_stimulus_id.split("\n")
list_time_start_trial = str_time_start_trial.split("\n")
list_onset_to_trial = str_onset_to_trial.split("\n")
list_seconds_end_trial = str_seconds_end_trial.split("\n")
list_dur_after_interval = str_dur_after_interval.split("\n")
list_dur_before_interval = str_dur_before_interval.split("\n")
list_time_end_trial = str_time_end_trial.split("\n")
list_skin_conductance_Fn = str_skin_conductance_Fn.split("\n")
list_heart_rate_Fn = str_heart_rate_Fn.split("\n")
list_brainwaves_Fn = str_brainwaves_Fn.split("\n")
# Remove first line in each of the session data lists
del list_trial_id[0]
del list_stimulus_id[0]
del list_time_start_trial[0]
del list_onset_to_trial[0]
del list_seconds_end_trial[0]
del list_dur_after_interval[0]
del list_dur_before_interval[0]
del list_time_end_trial[0]
del list_skin_conductance_Fn[0]
del list_heart_rate_Fn[0]
del list_brainwaves_Fn[0]
# Convert list to series
ser_start_at = pandas.Series(list_start_at, name='Session started at:')
ser_finish_at = pandas.Series(list_finish_at, name='Session finished at:')
ser_onset_at = pandas.Series(list_onset_at, name='First trial started at:')
ser_skin_conductance_D = pandas.Series(list_skin_conductance_D, name='Skin conductance D [SUM(FnE)-SUM(FnN)]:')
ser_heart_rate_D = pandas.Series(list_heart_rate_D, name='Heart rate D [SUM(FnE)-SUM(FnN)]:')
ser_brainwaves_D = pandas.Series(list_brainwaves_D, name='Brainwaves D [SUM(FnE)-SUM(FnN)]:')
ser_trial_id = pandas.Series(list_trial_id, name='Trial ID:')
ser_stimulus_id = pandas.Series(list_stimulus_id, name='Stimulus ID:')
ser_time_start_trial = pandas.Series(list_time_start_trial, name='Time at the start of trial:')
ser_onset_to_trial = pandas.Series(list_onset_to_trial, name='First trial to end of this trial (s):')
ser_seconds_end_trial = pandas.Series(list_seconds_end_trial, name='Duration of each trial (s):')
ser_dur_after_interval = pandas.Series(list_dur_after_interval, name='Interval after of each trial (s):')
ser_dur_before_interval = pandas.Series(list_dur_before_interval, name='Interval before of each trial (s):')
ser_time_end_trial = pandas.Series(list_time_end_trial, name='Time at the end of trial:')
ser_skin_conductance_Fn = pandas.Series(list_skin_conductance_Fn, name='Skin conductance Fn [SUM_fx_paa]:')
ser_heart_rate_Fn = pandas.Series(list_heart_rate_Fn, name='Heart rate Fn [SUM_fy_paa]:')
ser_brainwaves_Fn = pandas.Series(list_brainwaves_Fn, name='Brainwaves Fn [SUM_fz_paa]:')
# Generate dataframe by concatenating the series
df = pandas.concat([ser_start_at,
ser_finish_at,
ser_onset_at,
ser_skin_conductance_D,
ser_heart_rate_D,
ser_brainwaves_D,
ser_trial_id,
ser_stimulus_id,
ser_time_start_trial,
ser_time_end_trial,
ser_dur_before_interval,
ser_dur_after_interval,
ser_seconds_end_trial,
ser_onset_to_trial,
ser_skin_conductance_Fn,
ser_heart_rate_Fn,
ser_brainwaves_Fn], axis=1)
# Obtain the path for the file to be saved
save_path_name, _ = QFileDialog.getSaveFileName(self, 'Save File')
df.to_csv(save_path_name, index=False, encoding='ANSI') # "header=False" if want to remove headers
# print(df)
print(save_path_name)
def click_stop(self):
self.CODE_REBOOT = 1
# Close white screen
self.white_w.close()
# Add the datastamp for the end of the session
t_ff = datetime.now().strftime('%H:%M:%S.%f')[:-3]
self.tb_finish_at.setText("SESSION STOPPED AT: " + t_ff)
# Show message stating the end of the session
QMessageBox.about(self, "STOPPING...", "Wait until TRIAL and SESSION are stopped...")
def click_trial_type(self, index):
self.sb_before_min_interval.setEnabled(index)
self.sb_before_max_interval.setEnabled(index)
def click_neutral_stimuli(self):
# Obtain the path of the directory with the stimuli
open_path_name = QFileDialog.getExistingDirectory(self, 'Select the folder that contains the neutral stimuli')
# Convert obtained path to OS native syntaxis of path
open_path_name = QDir.toNativeSeparators(open_path_name)
# Add '/*' to obtain all stimuli
open_path_name = open_path_name+'\*'
self.gen_imgage_list(open_path_name,self.image_list_neutral,self.image_list_neutral_filenames)
def click_excitatory_stimuli(self):
# Obtain the path of the directory with the stimuli
open_path_name = QFileDialog.getExistingDirectory(self, 'Select the folder that contains the excitatory stimuli')
# Convert obtained path to OS native syntaxis of path
open_path_name = QDir.toNativeSeparators(open_path_name)
# Add '/*' to obtain all stimuli
open_path_name = open_path_name+'\*'
self.gen_imgage_list(open_path_name, self.image_list_excitatory,self.image_list_excitatory_filenames)
def click_shuffle(self): #!
pass
#& RNG DO STUFF
def rng_get_bits(self, rng, bits, min_interval, len_min_interval, max_interval, len_max_interval):
while bits > max_interval or bits < min_interval:
# Obtain specified max bits of RNG to generate interval
str_bits = rng.get_bits(len_max_interval)
bits = int(str_bits,2)
if bits <= max_interval and bits >= min_interval:
interval = bits
# Convert interval to seconds for being used in the Qtimer
interval1000 = interval * 1000
return interval1000
else:
QMessageBox.about(self, "ERROR", "RNG bits are inconsistent. Report bug with this messsage 'Review rng_get_bits'")
def rng_get_image(self, rng, bits, len_imglist, len_bin_imagelist):
while bits >= len_imglist:
# Obtain bits of RNG to generate index for imagelist
str_bits = rng.get_bits(len_bin_imagelist)
bits = int(str_bits,2)
if bits < len_imglist:
# Append with 'N' if neutral and 'E' if excotatory, and show image
# Starts counting from 0, so adding + 1 to the string
if bits < self.len_image_list_neutral:
self.tb_stimulus_id.append("N-" + str(bits + 1))
else:
self.tb_stimulus_id.append("E-" + str(bits + 1))
# Obtain image number in "image_list_filenames" array
self.image_window(self.image_list_filenames[bits])
else:
QMessageBox.about(self, "ERROR", "RNG bits generated an index number out of the range of the image list. Report bug with this messsage 'Review rng_get_image'")
#& DO STUFF
def thread_neulog(self,neulog_class,num_sensors,total_secs):
# Every (aprox) 10 seconds, get the values from Neulog and save as self.diction; also print length of the list in the dictionary
for x in range(total_secs):
time.sleep(9)
self.diction = neulog_class.get_exp_values()
print(datetime.now().strftime('%H:%M:%S.%f')[:-3])
for y in range(num_sensors):
print(len(self.diction[y]))
def delete_unused_phys_data(self, tb_time_versus_ant, tb_time_versus_post, tb_timestamps, tb_phys_vals):
# Convert text in textboxes to string
str_time_versus_ant = tb_time_versus_ant.text()
str_time_versus_post = tb_time_versus_post.text()
str_timestamps = tb_timestamps.toPlainText()
str_phys_vals = tb_phys_vals.toPlainText()
# Obtain only time from string
str_time_versus_ant = str_time_versus_ant[-12:]
str_time_versus_post = str_time_versus_post[-12:]
# Convert string to list
list_timestamps = str_timestamps.split("\n")
list_phys_vals = str_phys_vals.split("\n")
list_time_versus_ant = str_time_versus_ant.split("\n")
list_time_versus_post = str_time_versus_post.split("\n")
# Store and remove first line in each of the session data lists
list_timestamps_0 = list_timestamps[0]
del list_timestamps[0]
list_phys_vals_0 = list_phys_vals[0]
del list_phys_vals[0]
# Format the string timestamps to time format
list_timestamps_copy = list_timestamps.copy()
t_first = datetime.strptime(list_time_versus_ant[0],'%H:%M:%S.%f')
t_last = datetime.strptime(list_time_versus_post[0],'%H:%M:%S.%f')
count = 0
count_last = 0
# For each timestamp, format to time and compare to first trial, and...
for instance in list_timestamps_copy:
t_instance = datetime.strptime(instance,'%H:%M:%S.%f')
#remove the ones before the first trial
if t_instance < t_first:
list_timestamps.remove(instance)
count += 1
#remove the ones after the session ending
if t_instance > t_last:
list_timestamps.remove(instance)
count_last += 1
# Remove each value in the physical data corresponding to the deleted timestamps
for value in range(count):
del list_phys_vals[0]
for value in range(count_last):
del list_phys_vals[-1]
# Set the stored text for each textbox
tb_timestamps.setText(str(list_timestamps_0))
tb_phys_vals.setText(str(list_phys_vals_0))
# Append the good timestamps and values, corresponding to the start of the first trial
for instance in list_timestamps:
tb_timestamps.append(str(instance))
for instance in list_phys_vals:
tb_phys_vals.append(str(instance))
def create_phys_ids(self, tb_trials, tb_start_trial, tb_end_trial, tb_timestamps, tb_trialids, tb_instanceids):
# Convert text in textboxes to string
str_trials = tb_trials.toPlainText()
str_start_trial = tb_start_trial.toPlainText()
str_end_trial= tb_end_trial.toPlainText()
str_timestamps = tb_timestamps.toPlainText()
# Convert string to list
list_trials = str_trials.split("\n")
list_start_trial = str_start_trial.split("\n")
list_end_trial = str_end_trial.split("\n")
list_timestamps = str_timestamps.split("\n")
# Remove first line in each of the data in lists
del list_timestamps[0]
del list_trials[0]
del list_start_trial[0]
del list_end_trial[0]
# Counts for each trial and instance
count_trial = 0
count_instance = 1
# For each timestamp, format to time and compare to start of trial and end of trial
for instance in list_timestamps:
t_instance = datetime.strptime(instance,'%H:%M:%S.%f')
t_start_instance = datetime.strptime(list_start_trial[count_trial],'%H:%M:%S.%f')
t_end_instance = datetime.strptime(list_end_trial[count_trial],'%H:%M:%S.%f')
# For each instance, add the trial ID and instance ID to the respective textboxes
if t_instance >= t_start_instance:
if t_instance < t_end_instance:
tb_trialids.append(str(list_trials[count_trial]))
tb_instanceids.append(str(count_instance))
count_instance += 1
else:
count_trial += 1
count_instance = 1
tb_trialids.append(str(list_trials[count_trial]))
tb_instanceids.append(str(count_instance))
count_instance += 1
def calculate_media_sd_Z_f_Fn(self,presentiment_instances,tb_no_trials,tb_phys_vals,tb_trialids,tb_instanceids,tb_phys_media,tb_phys_sd, tb_phys_Z, tb_phys_f,tb_Fn):
# Convert text in textboxes to string
str_no_trials = tb_no_trials.toPlainText()
str_phys_vals = tb_phys_vals.toPlainText()
str_trialids = tb_trialids.toPlainText()
str_instanceids = tb_instanceids.toPlainText()
# Convert string to list
list_no_trials = str_no_trials.split("\n")
list_phys_vals = str_phys_vals.split("\n")
list_trialids = str_trialids.split("\n")
list_instanceids = str_instanceids.split("\n")
# Remove first line in each of the data in lists
del list_no_trials[0]
del list_phys_vals[0]
del list_trialids[0]
del list_instanceids[0]
# Declare counts:
count_presentiment_instances = 1
count_trials = 0
# Add the presentiment instances per trial to the corresponding presentiment_list
presentiment_lists = []
presentiment_lists_len = []
for trial in list_no_trials:
presentiment_lists.append([])
count_instances = -1
for value in list_phys_vals:
count_instances += 1
if trial == list_trialids[count_instances]:
if count_presentiment_instances <= presentiment_instances:
presentiment_lists[count_trials].append(float(value))
count_presentiment_instances += 1
# Add the lenght of each presentiment list to presentiment_list_len and raise counters
presentiment_lists_len.append(len(presentiment_lists[count_trials]))
count_trials += 1
count_presentiment_instances = 1
# For each trial calculate media and sd of the presentiment timeframe
count_trials = 0
for trial in list_no_trials:
phys_media = numpy.mean(presentiment_lists[count_trials])
phys_sd = numpy.std(presentiment_lists[count_trials], ddof=1)
# Define counts:
count_instances = -1
count_Z = -1
count_Fn = -1
Fn = 0
# For each value calculate Z and f, and append media, sd, Z and f to the respective textboxes
for value in list_phys_vals:
count_instances += 1
if trial == list_trialids[count_instances]:
phys_Z = (float(value)-phys_media)/phys_sd
count_Z += 1
count_Fn += 1
if count_Z == 0:
Z0 = phys_Z
phys_f = phys_Z - Z0
tb_phys_media.append(str(phys_media))
tb_phys_sd.append(str(phys_sd))
tb_phys_Z.append(str(phys_Z))
tb_phys_f.append(str(phys_f))
# Sum each f in the presentiment timeframe to generate Fn for each trial
if count_Fn < presentiment_lists_len[count_trials]:
Fn += phys_f
tb_Fn.append(str(Fn))
count_trials += 1
def calculate_D_Z(self,stimulus_id,trial_Fn,tb_D,tb_ZD):
# Store tb_D and tb_ZD text
tb_D_text = tb_D.text()
tb_ZD_text = tb_ZD.text()
# Convert text in textboxes to string
str_stimulus_id = stimulus_id.toPlainText()
str_trial_Fn = trial_Fn.toPlainText()
# Convert string to list
list_stimulus_id = str_stimulus_id.split("\n")
list_trial_Fn = str_trial_Fn.split("\n")
# Remove first line in each of the data in lists
del list_stimulus_id[0]
del list_trial_Fn[0]
# Declare number of stimuli and sums
N_stimuli = 0
E_stimuli = 0
sum_N_stimuli = 0
sum_E_stimuli = 0
count_trials = 0
# Count the number of neutral and excitatory stimuli in the stimulus_id textbox
for stim_id in list_stimulus_id:
if stim_id[:1] == 'N':
N_stimuli += 1
sum_N_stimuli += float(list_trial_Fn[count_trials])
else:
E_stimuli += 1
sum_E_stimuli += float(list_trial_Fn[count_trials])
count_trials += 1
# Calculate D (Σ FE - Σ FN) and append it to tb_D
calc_D = sum_E_stimuli - sum_N_stimuli
tb_D.setText(str(tb_D_text) + " " + str(calc_D))
# Create list of floats from list_trial_Fn
list_trial_Fn_floats = [float(i) for i in list_trial_Fn]
# Shuffle 5000 times the Fn values to generate D' to make a normal distribution
list_D_prime = []
for x in range (5000):
numpy.random.shuffle(list_trial_Fn_floats)
calc_D_prime = sum(list_trial_Fn_floats[:E_stimuli]) - sum(list_trial_Fn_floats[E_stimuli:])
list_D_prime.append(calc_D_prime)
calc_D_prime_media = numpy.mean(list_D_prime)
calc_D_prime_sd = numpy.std(list_D_prime)
calc_z = (calc_D - calc_D_prime_media) / calc_D_prime_sd
tb_ZD.setText(str(tb_ZD_text) + " " + str(calc_z))
#& DISPLAY IMAGES
def gen_imgage_list(self,open_path,img_list,img_list_fnames):
# Create an list of images from the directory which contains the stimuli
for filename in glob.glob(open_path):
im = Image.open(filename)
img_list.append(im)
img_list_fnames.append(im.filename)
def white_window(self):
# Create a QWidget where the Qlabel containing the image will be stored
self.white_w = QWidget()
label = QLabel(self.white_w)
# Obtain the image from route
path_pixmap = os.path.join(os.getcwd(), '.\Presentimiento\White.png') # White Screen path
pixmap = QtGui.QPixmap(path_pixmap)
# Fix the possible size of the image
pixmap = pixmap.scaled(2048, 1024, Qt.KeepAspectRatio)
label.setPixmap(pixmap)
label.setAlignment(Qt.AlignCenter)
# Create the layout
lay = QVBoxLayout()
lay.addWidget(label)
self.white_w.setLayout(lay)
self.white_w.showFullScreen()
def image_window(self, ruta):
# Create a QWidget where the Qlabel containing the image will be stored
self.image_w = QWidget()
label = QLabel(self.image_w)
# Obtain the image from route
pixmap = QtGui.QPixmap(ruta)
# Fix the possible size of the image
pixmap = pixmap.scaled(2048, 1024, Qt.KeepAspectRatio)
label.setPixmap(pixmap)
label.setAlignment(Qt.AlignCenter)
# Create the layout
lay = QVBoxLayout()
lay.addWidget(label)
self.image_w.setLayout(lay)
self.image_w.showFullScreen()
#& START SESSION
def start_session(self, trials):
#& SET START TIMESTAMP
# Print TimeStamp in the "Start at:" box.
t_start = datetime.now().strftime('%H:%M:%S.%f')[:-3]
self.tb_start_at.setText("Session started at: " + t_start)
#& START RECORDING PHYSIOLOGICAL DATA
# define physiological classes
#& NEULOG
# Define neulog class
neu = Neulog(self.tb_neulog_port.text())
neulog_used = False
neulog_phys_params = ""
neulog_num_sensors = 0
# Obtain the max amount of time for each trial
neulog_seconds = int(self.sb_first_screen.value()) + (int(self.sb_pre_screen.value()
+ self.sb_stim_duration.value() + self.sb_post_screen.value()
+ self.sb_before_max_interval.value() + self.sb_after_max_interval.value())
* int(self.sb_num_trials.value()))
if self.cb_skin_conductance.isChecked():
neulog_num_sensors += 1
if neu.name == self.combo_skin_conductance.currentText():
neulog_used = True
neulog_phys_params += ",GSR"
neulog_samples_mult = int(self.combo_skin_conductance_sample.currentText()[:-11])
# Translate the sample rate in main window to the Neulog API sample rate index
if self.combo_skin_conductance_sample.currentText() == "20 per second":
neulog_rate = '7'
elif self.combo_skin_conductance_sample.currentText() == "10 per second":
neulog_rate = '8'
elif self.combo_skin_conductance_sample.currentText() == "5 per second":
neulog_rate = '9'
elif self.combo_skin_conductance_sample.currentText() == "2 per second":
neulog_rate = '10'
elif self.combo_skin_conductance_sample.currentText() == "1 per second":
neulog_rate = '11'
else:
pass
if self.cb_heart_rate.isChecked():
neulog_num_sensors += 1
if neu.name == self.combo_heart_rate.currentText():
neulog_used = True
neulog_phys_params += ",Pulse"
neulog_samples_mult = int(self.combo_skin_conductance_sample.currentText()[:-11])
# Translate the sample rate in main window to the Neulog API sample rate index
if self.combo_heart_rate_sample.currentText() == "20 per second":
neulog_rate = '7'
elif self.combo_heart_rate_sample.currentText() == "10 per second":
neulog_rate = '8'
elif self.combo_heart_rate_sample.currentText() == "5 per second":
neulog_rate = '9'
elif self.combo_heart_rate_sample.currentText() == "2 per second":
neulog_rate = '10'
elif self.combo_heart_rate_sample.currentText() == "1 per second":
neulog_rate = '11'
else:
pass
if neulog_used == True:
# Obtain neulog parameters
exp_params = str(self.tb_neulog_port.text()) + neulog_phys_params
exp_params_list = exp_params.split(',')
neu = Neulog(* exp_params_list)
neu.exp_stop()
neulog_samples = str(neulog_seconds * neulog_samples_mult)
neulog_seconds_threading = int(neulog_seconds / 10)
# Start neulog experiment
neu.exp_start(neulog_rate,neulog_samples)
t_phys_start = datetime.now().strftime('%H:%M:%S.%f')[:-3]
self.tb_phys_start_at.setText("Physiological data started at: " + t_phys_start)
# Start thread to recover samples every 10 seconds
thread1 = threading.Thread(target=self.thread_neulog, args=(neu,neulog_num_sensors,neulog_seconds_threading,))
thread1.start()
else:
pass
#& RESTART CODE
self.CODE_REBOOT = -1234
# If CODE_REBOOT is changed with the "stop session" button, it will break the trails loop
#& SHOW INITIAL WHITE SCREEN
while self.CODE_REBOOT == -1234:
# Show white screen for the first 10 seconds, only once
self.white_window()
# Define counter of trials
counter_trial = 0
onset_duration = 0
# Timer of 10 seconds
loop = QEventLoop()
# Obtain the int value of the spin box of first_screen, and multiply it for 1000 (1000 is 1 second)
QTimer.singleShot((int(self.sb_first_screen.value())*1000), loop.quit)
loop.exec_()
t_onset = datetime.now().strftime('%H:%M:%S.%f')[:-3]
self.tb_onset_at.setText("First trial started at: " + t_onset)
#& START TRIAL
# The number of trials is stated in the "click_start_function"
for x in range (0,trials,1):
# If CODE_REBOOT is changed with the "stop session" button, it will break the trails loop
while self.CODE_REBOOT == -1234:
#& DEFINE USED VARIABLES
# RNGs
psyleron = PsyREG()
pseudo = Pseudo_RNG()
# intervals and duration at 0
after_interval = 0
before_interval = 0
counter_trial += 1
trial_duration = 0
# obtain length of the string of the binary of the string of the after stimuli max interval
int_after_max_interval = int(self.sb_after_max_interval.value())
len_bin_after_max_interval = len(str(f'{int_after_max_interval:01b}'))
# obtain length of the string of the binary of the string of the after stimuli min interval
int_after_min_interval = int(self.sb_after_min_interval.value())
len_bin_after_min_interval = len(str(f'{int_after_min_interval:01b}'))
# obtain length of the string of the binary of the string of the before stimuli max interval
int_before_max_interval = int(self.sb_before_max_interval.value())
len_bin_before_max_interval = len(str(f'{int_before_max_interval:01b}'))
# obtain length of the string of the binary of the string of the before stimuli min interval
int_before_min_interval = int(self.sb_before_min_interval.value())
len_bin_before_min_interval = len(str(f'{int_before_min_interval:01b}'))
# Add neutral and excitatory image lists
self.image_list = self.image_list_neutral + self.image_list_excitatory
self.image_list_filenames = self.image_list_neutral_filenames + self.image_list_excitatory_filenames
# obtain length of neutral, excitatory, and total image_list_filenames and obtain its binary length
self.len_image_list_neutral = len(self.image_list_neutral_filenames)
self.len_image_list_excitatory = len(self.image_list_excitatory_filenames)
len_image_list = len(self.image_list_filenames)
len_bin_image_list = len(str(f'{len_image_list:01b}'))
# establish variables that will force the first loop to obtain RNG bits from rng_get_image and rng_get_bits
int_after_bits = int_after_max_interval + 1
int_before_bits = int_before_max_interval + 1
image_bits = len_image_list + 1
# establish the constant duration of each trial adding the pre_stimuls screen duration, the stimulus duration, and post_stimulus screen duration
trial_dur_constant = int(self.sb_pre_screen.value() + self.sb_stim_duration.value() + self.sb_post_screen.value())
t_s = datetime.now().strftime('%H:%M:%S.%f')[:-3]
self.tb_time_start_trial.append(t_s)
self.tb_trial_id.append("n" + str(counter_trial))
#& ADDITIONAL ON DEMAND PROCEDURE
if counter_trial >= 2:
# Validate if session is On-demand or free-running
if self.combo_trial_type.currentText() == "On-Demand":
# Minimize main screen
self.showMinimized()
# Show button for starting the next trial
butt_demand = QMessageBox.question(self, "On-Demand: Trial Ended", "Please click 'Ok' to start the next trial.", QMessageBox.Ok | QMessageBox.Abort)
if butt_demand == QMessageBox.Abort:
self.butt_stop.click()
break
elif butt_demand == QMessageBox.Ok:
pass
else:
pass
#& ADD BEFORE STIMULI INTERVAL
if str(psyleron.get_name()) == self.combo_rng_sources.currentText():
if psyleron.count_PsyREGs() >= 1:
# Gets random interval from RNG giving the input 1) type of RNG, 2) max interval + 1,
# 3)min interval, 4) length of binary min interval,
# 5) max interval, and 6) length of binary max interval
before_interval_1000 = self.rng_get_bits(psyleron, int_before_bits, int_before_min_interval, len_bin_before_min_interval, int_before_max_interval, len_bin_before_max_interval)
before_interval = before_interval_1000 / 1000
# Clear and realease psyleron
psyleron.clear_RNG()
psyleron.release_RNG()
else:
QMessageBox.about(self, "ERROR", "Psyleron didn't send bits")
# Check if Pseudo-RNG is being used
elif pseudo.name == self.combo_rng_sources.currentText():
# Gets random interval from RNG giving the input 1) type of RNG, 2) max interval + 1,
# 3)min interval, 4) length of binary min interval,
# 5) max interval, and 6) length of binary max interval
before_interval_1000 = self.rng_get_bits(pseudo, int_before_bits, int_before_min_interval, len_bin_before_min_interval, int_before_max_interval, len_bin_before_max_interval)
before_interval = before_interval_1000 / 1000
else:
QMessageBox.about(self, "ERROR", "Unexpected RNG error.")
# Timer-wait according to random interval
loop = QEventLoop()
QTimer.singleShot(before_interval_1000, loop.quit)
loop.exec_()
else: #Free-Running
pass
else: # counter_trial <= 1
pass
#& WHITE SCREEN FOR SB_PRE-SCREEN secs (default = 3)
# It doesn't show anything, as the white screen is still showing since SHOW WHITE SCREEN
# Timer of 3 seconds
loop = QEventLoop()
# Obtain the int value of the spin box of pre_screen, and multiply it for 1000 (1000 is 1 second)
QTimer.singleShot((int(self.sb_pre_screen.value())*1000), loop.quit)
loop.exec_()
#& SHOW IMAGE FOR SB_IMAGE secs (default = 3)
# Selects randomly the image to show, and displays it for 3 seconds
# Check if Psyleron is being used
if str(psyleron.get_name()) == self.combo_rng_sources.currentText():
if psyleron.count_PsyREGs() >= 1:
# Gets random image from RNG giving the input 1) type of RNG, 2) length of image list + 1,
# 3) length of image list, and 4) length of the binary length of image list
self.rng_get_image(psyleron, image_bits, len_image_list, len_bin_image_list)
# Clear and release Psyleron
psyleron.clear_RNG()
psyleron.release_RNG()
else:
QMessageBox.about(self, "ERROR", "Psyleron didn't send bits")
# Check if Pseudo-RNG is being used
elif pseudo.name == self.combo_rng_sources.currentText():
# Gets random image from RNG giving the input 1) type of RNG, 2) length of image list + 1,
# 3) length of image list, and 4) length of the binary length of image list
self.rng_get_image(pseudo, image_bits, len_image_list, len_bin_image_list)
else:
QMessageBox.about(self, "ERROR", "No RNG selected. Closing app...")
# Timer of 3 seconds
loop = QEventLoop()
# Obtain the int value of the spin box of img_duration, and multiply it for 1000 (1000 is 1 second)
QTimer.singleShot((int(self.sb_stim_duration.value())*1000), loop.quit)
loop.exec_()
# Close image
self.image_w.close()
#& WHITE SCEEN FOR SB_POST-SCREEN secs (default = 9)
# Timer of 9 seconds
loop = QEventLoop()
# Obtain the int value of the spin box of post_screen, and multiply it for 1000 (1000 is 1 second)
QTimer.singleShot((int(self.sb_post_screen.value())*1000), loop.quit)
loop.exec_()
#& SESSION FINISHED?
# Check if all trails have happened (session over)
if counter_trial >= trials:
# Close white screen
self.white_w.close()
# Add the before stimulus interval of last trial
self.tb_dur_before_interval.append(str(int(before_interval)))
# Add a "0" string to the intervals, as last trial doesn't have interval
self.tb_dur_after_interval.append("0")
# Add the final onset duration
onset_duration += (trial_dur_constant + before_interval)
self.tb_onset_to_trial.append(str(int(onset_duration)))
# Add the final onset duration
trial_duration += (trial_dur_constant + before_interval)
self.tb_seconds_end_trial.append(str(int(trial_duration)))
# Add the datastamp at the end of that trial
t_f = datetime.now().strftime('%H:%M:%S.%f')[:-3]
self.tb_time_end_trial.append(t_f)
# Add the datastamp for the end of the session
t_ff = datetime.now().strftime('%H:%M:%S.%f')[:-3]
self.tb_finish_at.setText("Session finished at: " + t_ff)
# stop code
self.CODE_REBOOT = 1
#& PHYSIOLOGICAL DATA
# Validate if neulog us being used
if neulog_used == True:
# Obtain experiment values from Neulog server
sensor_lists = neu.get_exp_values()
sensor_list_index = 0
# Obtain each list in the lists in the Neulog server
for sensor_list in sensor_lists:
# Obtain the lapse of miliseconds between values of each sample
neulog_samples_mils = (1000/int(neulog_samples_mult))
t_phys_start_next = t_phys_start
# Obatain the values for each list
for sample in sensor_list:
if sensor_list_index == 0:
# Append the sensor value to the textbox
self.tb_skin_conductance_values.append(str(float(sample)))
# Obtain the timestamp corresponding to each value
t_phys_start_next = datetime.strptime(t_phys_start_next,'%H:%M:%S.%f')
t_phys_start_next = t_phys_start_next + timedelta(milliseconds = neulog_samples_mils)
t_phys_start_next = t_phys_start_next.strftime('%H:%M:%S.%f')[:-3]
self.tb_skin_conductance_timestamp.append(str(t_phys_start_next))
elif sensor_list_index == 1:
# Append the sensor value to the textbox
self.tb_heart_rate_values.append(str(float(sample)))
# Obtain the timestamp corresponding to each value
t_phys_start_next = datetime.strptime(t_phys_start_next,'%H:%M:%S.%f')
t_phys_start_next = t_phys_start_next + timedelta(milliseconds = neulog_samples_mils)
t_phys_start_next = t_phys_start_next.strftime('%H:%M:%S.%f')[:-3]
self.tb_heart_rate_timestamp.append(str(t_phys_start_next))
sample += 1
sensor_list_index += 1
# Stop neulog experiment in server
neu.exp_stop()
# Register physical data ending time
t_ff = datetime.now().strftime('%H:%M:%S.%f')[:-3]
self.tb_phys_finish_at.setText("Physiological data finished at: " + t_ff)
# Validate if other physiological hardware is being used: #!
elif neulog_used == False:
pass
#& ERASE DATA PRE-FIRST TRIAL STARTED
if self.cb_skin_conductance.isChecked():
self.delete_unused_phys_data(self.tb_onset_at,self.tb_finish_at,self.tb_skin_conductance_timestamp,self.tb_skin_conductance_values)
if self.cb_heart_rate.isChecked():
self.delete_unused_phys_data(self.tb_onset_at,self.tb_finish_at,self.tb_heart_rate_timestamp,self.tb_heart_rate_values)
if self.cb_brainwaves.isChecked():
self.delete_unused_phys_data(self.tb_onset_at,self.tb_finish_at,self.tb_brainwaves_timestamp,self.tb_brainwaves_values)
#& ADD TRIAL ID
if self.cb_skin_conductance.isChecked():
self.create_phys_ids(self.tb_trial_id,self.tb_time_start_trial,self.tb_time_end_trial,self.tb_skin_conductance_timestamp,self.tb_phys_trial_id,self.tb_phys_instance_id)
elif self.cb_heart_rate.isChecked():
self.create_phys_ids(self.tb_trial_id,self.tb_time_start_trial,self.tb_time_end_trial,self.tb_heart_rate_timestamp,self.tb_phys_trial_id,self.tb_phys_instance_id)
elif self.cb_brainwaves.isChecked():
self.create_phys_ids(self.tb_trial_id,self.tb_time_start_trial,self.tb_time_end_trial,self.tb_brainwaves_timestamp,self.tb_phys_trial_id,self.tb_phys_instance_id)
#& CALCULATE MEDIA, SD, Z, f and Fn
if neulog_used == True:
phys_sample_rate = neulog_samples_mult
self.presentiment_instances = phys_sample_rate * int(self.sb_pre_screen.text())
elif neulog_used == False: #!
pass
if self.cb_skin_conductance.isChecked():
self.calculate_media_sd_Z_f_Fn(self.presentiment_instances,self.tb_trial_id,self.tb_skin_conductance_values,self.tb_phys_trial_id,self.tb_phys_instance_id,self.tb_skin_conductance_media,self.tb_skin_conductance_sd,self.tb_skin_conductance_Z,self.tb_skin_conductance_f,self.tb_skin_conductance_Fn)
if self.cb_heart_rate.isChecked():
self.calculate_media_sd_Z_f_Fn(self.presentiment_instances,self.tb_trial_id,self.tb_heart_rate_values,self.tb_phys_trial_id,self.tb_phys_instance_id,self.tb_heart_rate_media,self.tb_heart_rate_sd,self.tb_heart_rate_Z,self.tb_heart_rate_f,self.tb_heart_rate_Fn)
if self.cb_brainwaves.isChecked():
self.calculate_media_sd_Z_f_Fn(self.presentiment_instances,self.tb_trial_id,self.tb_brainwaves_values,self.tb_phys_trial_id,self.tb_phys_instance_id,self.tb_brainwaves_media,self.tb_brainwaves_sd,self.tb_brainwaves_Z,self.tb_brainwaves_f,self.tb_brainwaves_Fn)
#& CALCULATE D AND ZD
if self.cb_skin_conductance.isChecked():
self.calculate_D_Z(self.tb_stimulus_id,self.tb_skin_conductance_Fn, self.tb_skin_conductance_D,self.tb_skin_conductance_ZD)
if self.cb_heart_rate.isChecked():
self.calculate_D_Z(self.tb_stimulus_id,self.tb_heart_rate_Fn, self.tb_heart_rate_D,self.tb_heart_rate_ZD)
if self.cb_brainwaves.isChecked():
self.calculate_D_Z(self.tb_stimulus_id,self.tb_brainwaves_Fn, self.tb_brainwaves_D,self.tb_brainwaves_ZD)
#& ENDING MESSAGE
# Show message stating the end of the session
QMessageBox.about(self, "FINAL", "The session has finished. Thanks for your participation.")
#& ADD EXTRA INTERVAL 0-5s
else:
# Check if Psyerlon is being used
if str(psyleron.get_name()) == self.combo_rng_sources.currentText():
if psyleron.count_PsyREGs() >= 1:
# Gets random interval from RNG giving the input 1) type of RNG, 2) max interval + 1,
# 3)min interval, 4) length of binary min interval,
# 5) max interval, and 6) length of binary max interval
after_interval_1000 = self.rng_get_bits(psyleron, int_after_bits, int_after_min_interval, len_bin_after_min_interval, int_after_max_interval, len_bin_after_max_interval)
after_interval = after_interval_1000 / 1000
# Clear and realease psyleron
psyleron.clear_RNG()
psyleron.release_RNG()
else:
QMessageBox.about(self, "ERROR", "Psyleron didn't send bits")
# Check if Pseudo-RNG is being used
elif pseudo.name == self.combo_rng_sources.currentText():
# Gets random interval from RNG giving the input 1) type of RNG, 2) max interval + 1,
# 3)min interval, 4) length of binary min interval,
# 5) max interval, and 6) length of binary max interval
after_interval_1000 = self.rng_get_bits(pseudo, int_after_bits, int_after_min_interval, len_bin_after_min_interval, int_after_max_interval, len_bin_after_max_interval)
after_interval = after_interval_1000 / 1000
else:
QMessageBox.about(self, "ERROR", "Unexpected RNG error.")
# Timer-wait according to random interval
loop = QEventLoop()
QTimer.singleShot(after_interval_1000, loop.quit)
loop.exec_()
#& ADD SESSION DATA
# Add the interval, duration, onset time, and end time of trial
self.tb_dur_before_interval.append(str(int(before_interval)))
self.tb_dur_after_interval.append(str(int(after_interval)))
onset_duration += (trial_dur_constant + before_interval + after_interval)
self.tb_onset_to_trial.append(str(int(onset_duration)))
trial_duration += (trial_dur_constant + before_interval + after_interval)
self.tb_seconds_end_trial.append(str(int(trial_duration)))
t_f = datetime.now().strftime('%H:%M:%S.%f')[:-3]
self.tb_time_end_trial.append(t_f)
else: # Reboot
QMessageBox.about(self, "TRIAL STOPPED", "TRIAL stopped, wait until SESSION has stopped.")
break
else: # Reboot
QMessageBox.about(self, "SESSION STOPPED", "SESSION has stopped, wait for further instructions. Clear Session Data before next session.")
#########################
if __name__ == '__main__':
app = QApplication(sys.argv)
window = Create_Window()
window.show()
sys.exit(app.exec_())
|
server.py
|
from config import *
import re
import os
import cv2
import time
import json
import base64
import shutil
import datetime
import threading
import numpy as np
from bottle import route, run, static_file, request, BaseRequest, response
from ai import *
from tricks import *
BaseRequest.MEMFILE_MAX = 10000 * 1000
def get_request_image(name):
img = request.forms.get(name)
img = re.sub('^data:image/.+;base64,', '', img)
img = base64.urlsafe_b64decode(img)
img = np.fromstring(img, dtype=np.uint8)
img = cv2.imdecode(img, -1)
return img
@route('/<filename:path>')
def send_static(filename):
return static_file(filename, root='game/')
@route('/')
def send_static():
return static_file("index.html", root='game/')
sketch_upload_pool = []
painting_pool = []
def handle_sketch_upload_pool():
if len(sketch_upload_pool) > 0:
room, sketch, method = sketch_upload_pool[0]
del sketch_upload_pool[0]
room_path = 'game/rooms/' + room
print('processing sketch in ' + room_path)
if os.path.exists(room_path + '/sketch.improved.jpg'):
improved_sketch = cv2.imread(room_path + '/sketch.improved.jpg')
print('lucky to find improved sketch')
else:
improved_sketch = sketch.copy()
improved_sketch = min_resize(improved_sketch, 512)
improved_sketch = cv_denoise(improved_sketch)
improved_sketch = sensitive(improved_sketch, s=5.0)
improved_sketch = go_tail(improved_sketch)
cv2.imwrite(room_path + '/sketch.improved.jpg', improved_sketch)
color_sketch = improved_sketch.copy()
std = cal_std(color_sketch)
print('std = ' + str(std))
need_de_painting = (std > 100.0) and method == 'rendering'
if method=='recolorization' or need_de_painting:
if os.path.exists(room_path + '/sketch.recolorization.jpg') or os.path.exists(room_path + '/sketch.de_painting.jpg'):
print('lucky to find lined sketch')
else:
improved_sketch = go_passline(color_sketch)
improved_sketch = min_k_down_c(improved_sketch, 2)
improved_sketch = cv_denoise(improved_sketch)
improved_sketch = go_tail(improved_sketch)
improved_sketch = sensitive(improved_sketch, s=5.0)
cv2.imwrite(room_path + '/sketch.recolorization.jpg', min_black(improved_sketch))
if need_de_painting:
cv2.imwrite(room_path + '/sketch.de_painting.jpg', min_black(improved_sketch))
print('In rendering mode, the user has uploaded a painting, and I have translated it into a sketch.')
print('sketch lined')
cv2.imwrite(room_path + '/sketch.colorization.jpg', min_black(color_sketch))
cv2.imwrite(room_path + '/sketch.rendering.jpg', eye_black(color_sketch))
print('sketch improved')
return
def handle_painting_pool():
if len(painting_pool) > 0:
room, ID, sketch, alpha, reference, points, method, lineColor, line = painting_pool[0]
del painting_pool[0]
room_path = 'game/rooms/' + room
print('processing painting in ' + room_path)
sketch_1024 = k_resize(sketch, 64)
if os.path.exists(room_path + '/sketch.de_painting.jpg') and method == 'rendering':
vice_sketch_1024 = k_resize(cv2.imread(room_path + '/sketch.de_painting.jpg', cv2.IMREAD_GRAYSCALE), 64)
sketch_256 = mini_norm(k_resize(min_k_down(vice_sketch_1024, 2), 16))
sketch_128 = hard_norm(sk_resize(min_k_down(vice_sketch_1024, 4), 32))
else:
sketch_256 = mini_norm(k_resize(min_k_down(sketch_1024, 2), 16))
sketch_128 = hard_norm(sk_resize(min_k_down(sketch_1024, 4), 32))
print('sketch prepared')
if debugging:
cv2.imwrite(room_path + '/sketch.128.jpg', sketch_128)
cv2.imwrite(room_path + '/sketch.256.jpg', sketch_256)
baby = go_baby(sketch_128, opreate_normal_hint(ini_hint(sketch_128), points, type=0, length=1))
baby = de_line(baby, sketch_128)
for _ in range(16):
baby = blur_line(baby, sketch_128)
baby = go_tail(baby)
baby = clip_15(baby)
if debugging:
cv2.imwrite(room_path + '/baby.' + ID + '.jpg', baby)
print('baby born')
composition = go_gird(sketch=sketch_256, latent=d_resize(baby, sketch_256.shape), hint=ini_hint(sketch_256))
if line:
composition = emph_line(composition, d_resize(min_k_down(sketch_1024, 2), composition.shape), lineColor)
composition = go_tail(composition)
cv2.imwrite(room_path + '/composition.' + ID + '.jpg', composition)
print('composition saved')
painting_function = go_head
if method == 'rendering':
painting_function = go_neck
print('method: ' + method)
result = painting_function(
sketch=sketch_1024,
global_hint=k_resize(composition, 14),
local_hint=opreate_normal_hint(ini_hint(sketch_1024), points, type=2, length=2),
global_hint_x=k_resize(reference, 14) if reference is not None else k_resize(composition, 14),
alpha=(1 - alpha) if reference is not None else 1
)
result = go_tail(result)
cv2.imwrite(room_path + '/result.' + ID + '.jpg', result)
cv2.imwrite('results/' + room + '.' + ID + '.jpg', result)
if debugging:
cv2.imwrite(room_path + '/icon.' + ID + '.jpg', max_resize(result, 128))
return
@route('/upload_sketch', method='POST')
def upload_sketch():
room = request.forms.get("room")
previous_step = request.forms.get("step")
if previous_step == 'sample':
new_room_id = datetime.datetime.now().strftime('%b%dH%HM%MS%S') + 'R' + str(np.random.randint(100, 999))
shutil.copytree('game/samples/' + room, 'game/rooms/' + new_room_id)
print('copy ' + 'game/samples/' + room + ' to ' + 'game/rooms/' + new_room_id)
room = new_room_id
ID = datetime.datetime.now().strftime('H%HM%MS%S')
method = request.forms.get("method")
if room == 'new':
room = datetime.datetime.now().strftime('%b%dH%HM%MS%S') + 'R' + str(np.random.randint(100, 999))
room_path = 'game/rooms/' + room
os.makedirs(room_path, exist_ok=True)
sketch = from_png_to_jpg(get_request_image('sketch'))
cv2.imwrite(room_path + '/sketch.original.jpg', sketch)
print('original_sketch saved')
else:
room_path = 'game/rooms/' + room
sketch = cv2.imread(room_path + '/sketch.original.jpg')
print('sketch upload pool get request: ' + method)
sketch_upload_pool.append((room, sketch, method))
while True:
time.sleep(0.1)
if os.path.exists(room_path + '/sketch.' + method + '.jpg'):
break
time.sleep(1.0)
return room + '_' + ID
@route('/request_result', method='POST')
def request_result():
room = request.forms.get("room")
previous_step = request.forms.get("step")
if previous_step == 'sample':
new_room_id = datetime.datetime.now().strftime('%b%dH%HM%MS%S') + 'R' + str(np.random.randint(100, 999))
shutil.copytree('game/samples/' + room, 'game/rooms/' + new_room_id)
print('copy ' + 'game/samples/' + room + ' to ' + 'game/rooms/' + new_room_id)
room = new_room_id
ID = datetime.datetime.now().strftime('H%HM%MS%S')
room_path = 'game/rooms/' + room
options_str = request.forms.get("options")
if debugging:
with open(room_path + '/options.' + ID + '.json', 'w') as f:
f.write(options_str)
options = json.loads(options_str)
method = options["method"]
sketch = cv2.imread(room_path + '/sketch.' + method + '.jpg', cv2.IMREAD_GRAYSCALE)
alpha = float(options["alpha"])
points = options["points"]
for _ in range(len(points)):
points[_][1] = 1 - points[_][1]
if options["hasReference"]:
reference = from_png_to_jpg(get_request_image('reference'))
cv2.imwrite(room_path + '/reference.' + ID + '.jpg', reference)
reference = s_enhance(reference)
else:
reference = None
print('request result room = ' + str(room) + ', ID = ' + str(ID))
lineColor = np.array(options["lineColor"])
line = options["line"]
painting_pool.append([room, ID, sketch, alpha, reference, points, method, lineColor, line])
while True:
time.sleep(0.1)
if os.path.exists(room_path + '/result.' + ID + '.jpg'):
break
time.sleep(1.0)
return room + '_' + ID
@route('/get_sample_list', method='POST')
def get_sample_list():
all_names = []
for (root, dirs, files) in os.walk("game/samples"):
all_names = dirs
break
all_names.sort()
result = json.dumps(all_names)
return result
@route('/save_as_sample', method='POST')
def save_as_sample():
room = request.forms.get("room")
step = request.forms.get("step")
previous_path = 'game/rooms/' + room
new_path = 'game/samples/' + room
os.makedirs(new_path, exist_ok=True)
def transfer(previous_file_name, new_file_name=None):
if new_file_name is None:
new_file_name = previous_file_name
if os.path.exists(previous_path + '/' + previous_file_name):
shutil.copy(previous_path + '/' + previous_file_name, new_path + '/' + new_file_name)
transfer('sketch.original.jpg')
transfer('sketch.improved.jpg')
transfer('sketch.colorization.jpg')
transfer('sketch.rendering.jpg')
transfer('sketch.recolorization.jpg')
transfer('sketch.de_painting.jpg')
transfer('result.' + step + '.jpg', 'result.sample.jpg')
transfer('reference.' + step + '.jpg', 'reference.sample.jpg')
transfer('icon.' + step + '.jpg', 'icon.sample.jpg')
transfer('composition.' + step + '.jpg', 'composition.sample.jpg')
transfer('options.' + step + '.json', 'options.sample.json')
print('saved')
return 'ok'
def server_loop():
while True:
time.sleep(0.173)
try:
handle_sketch_upload_pool()
handle_painting_pool()
except Exception as e:
print(e)
os.makedirs('game/rooms', exist_ok=True)
os.makedirs('results', exist_ok=True)
threading.Thread(target=server_loop).start()
if multiple_process:
run(host="0.0.0.0", port=8080, server='paste')
else:
run(host="0.0.0.0", port=8000, server='paste')
|
plugin.py
|
#!/usr/bin/env python3
#
# Electron Cash - a lightweight Bitcoin Cash client
# CashFusion - an advanced coin anonymizer
#
# Copyright (C) 2020 Mark B. Lundeberg
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Base plugin (non-GUI)
"""
import math
import threading
import time
import weakref
from typing import Optional, Tuple
from electroncash.address import Address, OpCodes
from electroncash.bitcoin import COINBASE_MATURITY, TYPE_SCRIPT
from electroncash.plugins import BasePlugin, hook, daemon_command
from electroncash.i18n import _, ngettext, pgettext
from electroncash.util import profiler, PrintError, InvalidPassword
from electroncash import Network, networks, Transaction
from .conf import Conf, Global
from .fusion import Fusion, can_fuse_from, can_fuse_to, is_tor_port, MIN_TX_COMPONENTS
from .server import FusionServer
from .covert import limiter
from .protocol import Protocol
from .util import get_coin_name
import random # only used to select random coins
TOR_PORTS = [9050, 9150]
# if more than <N> tor connections have been made recently (see covert.py) then don't start auto-fuses.
AUTOFUSE_RECENT_TOR_LIMIT_LOWER = 60
# if more than <N> tor connections have been made recently (see covert.py) then shut down auto-fuses that aren't yet started
AUTOFUSE_RECENT_TOR_LIMIT_UPPER = 120
# heuristic factor: guess that expected number of coins in wallet in equilibrium is = (this number) / fraction
COIN_FRACTION_FUDGE_FACTOR = 10
# for semi-linked addresses (that share txids in their history), allow linking them with this probability:
KEEP_LINKED_PROBABILITY = 0.1
# how long an auto-fusion may stay in 'waiting' state (without starting-soon) before it cancels itself
AUTOFUSE_INACTIVE_TIMEOUT = 600
# how many random coins to select max in 1 batch -- used by select_random_coins
DEFAULT_MAX_COINS = 20
assert DEFAULT_MAX_COINS > 10
# how many autofusions can be running per-wallet
MAX_AUTOFUSIONS_PER_WALLET = 10
CONSOLIDATE_MAX_OUTPUTS = MIN_TX_COMPONENTS // 3
# Threshold for the amount (sats) for a wallet to be fully fused. This is to avoid refuse when dusted.
FUSE_DEPTH_THRESHOLD = 0.95
# We don't allow a fuse depth beyond this in the wallet UI
MAX_LIMIT_FUSE_DEPTH = 10
pnp = None
def get_upnp():
""" return an initialized UPnP singleton """
global pnp
if pnp is not None:
return pnp
try:
import miniupnpc
except ImportError:
raise RuntimeError("python miniupnpc module not installed")
u = miniupnpc.UPnP()
if u.discover() < 1:
raise RuntimeError("can't find UPnP server")
try:
u.selectigd()
except Exception as e:
raise RuntimeError("failed to connect to UPnP IGD")
pnp = u
return u
def select_coins(wallet):
""" Sort the wallet's coins into address buckets, returning two lists:
- Eligible addresses and their coins.
- Ineligible addresses and their coins.
An address is eligible if it satisfies all conditions:
- the address is unfrozen
- has 1, 2, or 3 utxo
- all utxo are confirmed (or matured in case of coinbases)
- has no SLP utxo or frozen utxo
"""
# First, select all the coins
eligible = []
ineligible = []
has_unconfirmed = False
has_coinbase = False
sum_value = 0
mincbheight = (wallet.get_local_height() + 1 - COINBASE_MATURITY if Conf(wallet).autofuse_coinbase
else -1) # -1 here causes coinbase coins to always be rejected
for addr in wallet.get_addresses():
acoins = list(wallet.get_addr_utxo(addr).values())
if not acoins:
continue # prevent inserting empty lists into eligible/ineligible
good = True
if addr in wallet.frozen_addresses:
good = False
for i,c in enumerate(acoins):
sum_value += c['value'] # tally up values regardless of eligibility
# If too many coins, any SLP tokens, any frozen coins, or any
# immature coinbase on the address -> flag all address coins as
# ineligible if not already flagged as such.
good = good and (
i < 3 # must not have too many coins on the same address*
and not c['slp_token'] # must not be SLP
and not c['is_frozen_coin'] # must not be frozen
and (not c['coinbase'] or c['height'] <= mincbheight) # if coinbase -> must be mature coinbase
)
# * = We skip addresses with too many coins, since they take up lots
# of 'space' for consolidation. TODO: there is possibility of
# disruption here, if we get dust spammed. Need to deal with
# 'dusty' addresses by ignoring / consolidating dusty coins.
# Next, detect has_unconfirmed & has_coinbase:
if c['height'] <= 0:
# Unconfirmed -> Flag as not eligible and set the has_unconfirmed flag.
good = False
has_unconfirmed = True
# Update has_coinbase flag if not already set
has_coinbase = has_coinbase or c['coinbase']
if good:
eligible.append((addr,acoins))
else:
ineligible.append((addr,acoins))
return eligible, ineligible, int(sum_value), bool(has_unconfirmed), bool(has_coinbase)
def select_random_coins(wallet, fraction, eligible):
"""
Grab wallet coins with a certain probability, while also paying attention
to obvious linkages and possible linkages.
Returns list of list of coins (bucketed by obvious linkage).
"""
# First, we want to bucket coins together when they have obvious linkage.
# Coins that are linked together should be spent together.
# Currently, just look at address.
addr_coins = eligible
random.shuffle(addr_coins)
# While fusing we want to pay attention to semi-correlations among coins.
# When we fuse semi-linked coins, it increases the linkage. So we try to
# avoid doing that (but rarely, we just do it anyway :D).
# Currently, we just look at all txids touched by the address.
# (TODO this is a disruption vector: someone can spam multiple fusions'
# output addrs with massive dust transactions (2900 outputs in 100 kB)
# that make the plugin think that all those addresses are linked.)
result_txids = set()
result = []
num_coins = 0
for addr, acoins in addr_coins:
if num_coins >= DEFAULT_MAX_COINS:
break
elif num_coins + len(acoins) > DEFAULT_MAX_COINS:
continue
# For each bucket, we give a separate chance of joining.
if random.random() > fraction:
continue
# Semi-linkage check:
# We consider all txids involving the address, historical and current.
ctxids = {txid for txid, height in wallet.get_address_history(addr)}
collisions = ctxids.intersection(result_txids)
# Note each collision gives a separate chance of discarding this bucket.
if random.random() > KEEP_LINKED_PROBABILITY**len(collisions):
continue
# OK, no problems: let's include this bucket.
num_coins += len(acoins)
result.append(acoins)
result_txids.update(ctxids)
if not result:
# nothing was selected, just try grabbing first nonempty bucket
try:
res = next(coins for addr,coins in addr_coins if coins)
result = [res]
except StopIteration:
# all eligible buckets were cleared.
pass
return result
def get_target_params_1(wallet, wallet_conf, active_autofusions, eligible):
""" WIP -- TODO: Rename this function. """
wallet_conf = Conf(wallet)
mode = wallet_conf.fusion_mode
# Note each fusion 'consumes' a certain number of coins by freezing them,
# so that the next fusion has less eligible coins to work with. So each
# call to this may see a smaller n_buckets.
n_buckets = len(eligible)
if mode == 'normal':
return max(2, round(n_buckets / DEFAULT_MAX_COINS)), False
elif mode == 'fan-out':
return max(4, math.ceil(n_buckets / (COIN_FRACTION_FUDGE_FACTOR*0.65))), False
elif mode == 'consolidate':
if n_buckets < MIN_TX_COMPONENTS - CONSOLIDATE_MAX_OUTPUTS:
# Too few eligible buckets to make an effective consolidation.
return 0, False
# In the latter stages of consolidation, only do one fusion
# at a time with all-confirmed rule, to make sure each fusion's outputs
# may be consumed by the subsequent one.
# To avoid weird loops, try to calculate the TOTAL number of coins
# that are either 1) eligible or 2) being fused. (Should stay constant
# as fusions are added/cancelled)
n_coins = sum(len(acoins) for addr,acoins in eligible)
n_total = n_coins + sum(len(getattr(f, 'inputs', ())) for f in active_autofusions)
if n_total < DEFAULT_MAX_COINS*3:
return 1, True
# If coins are scarce then don't make more autofusions unless we
# have none.
if n_buckets < DEFAULT_MAX_COINS*2:
return 1, False
# We still have lots of coins left, so request another autofusion.
return MAX_AUTOFUSIONS_PER_WALLET, False
else: # 'custom'
target_num_auto = wallet_conf.queued_autofuse
confirmed_only = wallet_conf.autofuse_confirmed_only
return int(target_num_auto), bool(confirmed_only)
def get_target_params_2(wallet_conf, sum_value):
""" WIP -- TODO: Rename this function. """
mode = wallet_conf.fusion_mode
fraction = 0.1
if mode == 'custom':
# Determine the fraction that should be used
select_type, select_amount = wallet_conf.selector
if select_type == 'size' and int(sum_value) != 0:
# user wants to get a typical output of this size (in sats)
fraction = COIN_FRACTION_FUDGE_FACTOR * select_amount / sum_value
elif select_type == 'count' and int(select_amount) != 0:
# user wants this number of coins
fraction = COIN_FRACTION_FUDGE_FACTOR / select_amount
elif select_type == 'fraction':
# user wants this fraction
fraction = select_amount
# note: fraction at this point could be <0 or >1 but doesn't matter.
elif mode == 'consolidate':
fraction = 1.0
elif mode == 'normal':
fraction = 0.5
elif mode == 'fan-out':
fraction = 0.1
return fraction
class FusionPlugin(BasePlugin):
fusion_server = None
active = True
_run_iter = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) # gives us self.config
# Do an initial check on the tor port
self.tor_port_good = None
t = threading.Thread(name = 'Fusion-scan_torport_initial', target = self.scan_torport)
t.start()
# quick lock for the following two WeakKeyDictionary variables
# Locking order wallet.lock -> plugin.lock.
self.lock = threading.Lock()
self.fusions = weakref.WeakKeyDictionary()
self.autofusing_wallets = weakref.WeakKeyDictionary() # wallet -> password
self.registered_network_callback = False
self.t_last_net_ok = time.monotonic()
self.remote_donation_address: str = '' # optionally announced by the remote server in 'serverhello' message
if tuple(self.config.get('cashfusion_server', ())) == ('cashfusion.electroncash.dk', 8787, False):
# User's config has the old default non-SSL server. If we see this,
# just wipe the config key so that the new default is used.
# But only reset once, after that let them go back if that is what
# they truly desire.
if self.config.get('cashfusion_server_defaultresetted', 0) < 1:
self.config.set_key('cashfusion_server', None)
self.config.set_key('cashfusion_server_defaultresetted', 1)
def on_close(self,):
super().on_close()
self.stop_fusion_server()
if self.registered_network_callback:
self.registered_network_callback = False
network = Network.get_instance()
if network:
network.unregister_callback(self.on_wallet_transaction)
self.active = False
def fullname(self):
return 'CashFusion'
def description(self):
return _("CashFusion Protocol")
def is_available(self):
return True
def set_remote_donation_address(self, address : str):
self.remote_donation_address = ((isinstance(address, str) and address) or '')[:100]
def get_server(self, ):
return Global(self.config).server
def set_server(self, host, port, ssl):
gconf = Global(self.config)
old = gconf.server
gconf.server = (host, port, ssl) # type/sanity checking done in setter
if old != gconf.server:
self.on_server_changed()
def get_torhost(self):
if self.has_auto_torport():
return Global.Defaults.TorHost
else:
return Global(self.config).tor_host
def set_torhost(self, host):
''' host should be a valid hostname '''
if not host: return
Global(self.config).tor_host = host
def has_auto_torport(self, ):
return Global(self.config).tor_port_auto
def get_torport(self, ):
''' Retreive either manual port or autodetected port; may return None
if 'auto' mode and no Tor port has been autodetected. (this is non-blocking) '''
if self.has_auto_torport():
return self.tor_port_good
else:
return Global(self.config).tor_port_manual
def set_torport(self, port):
# port may be 'auto' or 'manual' or an int
gconf = Global(self.config)
if port == 'auto':
gconf.tor_port_auto = True
return
else:
gconf.tor_port_auto = False
if port == 'manual':
return # we're simply going to use whatever manual port was already set
assert isinstance(port, int)
gconf.tor_port_manual = port
def scan_torport(self, ):
''' Scan for Tor proxy on either the manual port or on a series of
automatic ports. This is blocking. Returns port if it's up, or None if
down / can't find. '''
host = self.get_torhost()
if self.has_auto_torport():
portlist = []
network = Network.get_instance()
if network:
tc = network.tor_controller
if tc and tc.is_enabled() and tc.active_socks_port:
portlist.append(tc.active_socks_port)
portlist.extend(TOR_PORTS)
else:
portlist = [ Global(self.config).tor_port_manual ]
for port in portlist:
if is_tor_port(host, port):
self.tor_port_good = port
break
else:
self.tor_port_good = None
return self.tor_port_good
def on_server_changed(self):
""" When the server is changed, we stop all extant fusions that are not
already 'running' in order to allow for the new change to take effect
immediately. """
self.remote_donation_address = ''
self.stop_all_fusions('Server changed', not_if_running=True)
def get_all_fusions(self, ):
""" Return all still-live fusion objects that have been created using .start_fusion(),
including autofusions and any other fusions. """
with self.lock:
fusions_and_times = list(self.fusions.items())
fusions_and_times.sort(key=lambda x:x[1])
return [f for f,t in fusions_and_times]
def stop_all_fusions(self, reason, *, not_if_running=True):
with self.lock:
for f in list(self.fusions):
f.stop(reason, not_if_running = not_if_running)
@staticmethod
def stop_autofusions(wallet, reason, *, not_if_running=True):
with wallet.lock:
try:
fusion_weakset = wallet._fusions_auto
except AttributeError:
return []
running = []
for f in list(fusion_weakset):
if not f.is_alive():
fusion_weakset.discard(f)
continue
f.stop(reason, not_if_running = not_if_running)
if f.status[0] == 'running':
running.append(f)
return running
def disable_autofusing(self, wallet):
with self.lock:
self.autofusing_wallets.pop(wallet, None)
Conf(wallet).autofuse = False
return self.stop_autofusions(wallet, 'Autofusing disabled', not_if_running=True)
def enable_autofusing(self, wallet, password):
if password is None and wallet.has_password():
raise InvalidPassword()
else:
wallet.check_password(password)
with self.lock:
self.autofusing_wallets[wallet] = password
Conf(wallet).autofuse = True
def is_autofusing(self, wallet):
with self.lock:
return (wallet in self.autofusing_wallets)
def add_wallet(self, wallet, password=None):
''' Attach the given wallet to fusion plugin, allowing it to be used in
fusions with clean shutdown. Also start auto-fusions for wallets that want
it (if no password).
'''
with wallet.lock:
# Generate wallet._fusions and wallet._fusions_auto; these must
# only be accessed with wallet.lock held.
# all fusions relating to this wallet, either as source or target
# or both.
wallet._fusions = weakref.WeakSet()
# fusions that were auto-started.
wallet._fusions_auto = weakref.WeakSet()
# caache: stores a map of txid -> fusion_depth (or False if txid is not a fuz tx)
wallet._cashfusion_is_fuz_txid_cache = dict()
# cache: stores a map of address -> fusion_depth if the address has fuz utxos
wallet._cashfusion_address_cache = dict()
# all accesses to the above must be protected by wallet.lock
if Conf(wallet).autofuse:
try:
self.enable_autofusing(wallet, password)
except InvalidPassword:
self.disable_autofusing(wallet)
if not self.registered_network_callback and wallet.network:
wallet.network.register_callback(self.on_wallet_transaction, ['new_transaction'])
self.registered_network_callback = True
def remove_wallet(self, wallet):
''' Detach the provided wallet; returns list of active fusion threads. '''
with self.lock:
self.autofusing_wallets.pop(wallet, None)
fusions = ()
try:
with wallet.lock:
fusions = list(wallet._fusions)
del wallet._fusions
del wallet._fusions_auto
del wallet._cashfusion_is_fuz_txid_cache
del wallet._cashfusion_address_cache
except AttributeError:
pass
return [f for f in fusions if f.is_alive()]
def start_fusion(self, source_wallet, password, coins, target_wallet = None, max_outputs = None, inactive_timeout = None):
""" Create and start a new Fusion object with current server/tor settings.
Both source_wallet.lock and target_wallet.lock must be held.
FIXME: this condition is begging for a deadlock to happen when the two wallets
are different. Need to find a better way if inter-wallet fusing actually happens.
"""
if target_wallet is None:
target_wallet = source_wallet # self-fuse
assert can_fuse_from(source_wallet)
assert can_fuse_to(target_wallet)
host, port, ssl = self.get_server()
if host == 'localhost':
# as a special exemption for the local fusion server, we don't use Tor.
torhost = None
torport = None
else:
torhost = self.get_torhost()
torport = self.get_torport()
if torport is None:
torport = self.scan_torport() # may block for a very short time ...
if torport is None:
self.notify_server_status(False, ("failed", _("Invalid Tor proxy or no Tor proxy found")))
raise RuntimeError("can't find tor port")
fusion = Fusion(self, target_wallet, host, port, ssl, torhost, torport)
fusion.add_coins_from_wallet(source_wallet, password, coins)
fusion.max_outputs = max_outputs
with self.lock:
fusion.start(inactive_timeout = inactive_timeout)
self.fusions[fusion] = time.time()
target_wallet._fusions.add(fusion)
source_wallet._fusions.add(fusion)
return fusion
def thread_jobs(self, ):
return [self]
def run(self, ):
# this gets called roughly every 0.1 s in the Plugins thread; downclock it to 5 s.
run_iter = self._run_iter + 1
if run_iter < 50:
self._run_iter = run_iter
return
else:
self._run_iter = 0
if not self.active:
return
dont_start_fusions = False
network = Network.get_instance()
if network and network.is_connected():
self.t_last_net_ok = time.monotonic()
else:
# Cashfusion needs an accurate picture of the wallet's coin set, so
# that we don't reuse addresses and we don't submit already-spent coins.
# Currently the network is not synced so we won't start new fusions.
dont_start_fusions = True
if time.monotonic() - self.t_last_net_ok > 31:
# If the network is disconnected for an extended period, we also
# shut down all waiting fusions. We can't wait too long because
# one fusion might succeed but then enter the 'time_wait' period
# where it is waiting to see the transaction on the network.
# After 60 seconds it gives up and then will unreserve addresses,
# and currently-waiting fusions would then grab those addresses when
# they begin rounds.
self.stop_all_fusions('Lost connection to Electron Cash server', not_if_running = True)
return
# Snapshot of autofusing list; note that remove_wallet may get
# called on one of the wallets, after lock is released.
with self.lock:
wallets_and_passwords = list(self.autofusing_wallets.items())
torcount = limiter.count
if torcount > AUTOFUSE_RECENT_TOR_LIMIT_UPPER:
# need tor cooldown, stop the waiting autofusions
for wallet, password in wallets_and_passwords:
self.stop_autofusions(wallet, 'Tor cooldown', not_if_running = True)
return
if torcount > AUTOFUSE_RECENT_TOR_LIMIT_LOWER:
# no urgent need to stop fusions, but don't queue up any more.
dont_start_fusions = True
for wallet, password in wallets_and_passwords:
with wallet.lock:
if not hasattr(wallet, '_fusions'):
continue
if not wallet.up_to_date:
# We want a good view of the wallet so we know which coins
# are unspent and confirmed, and we know which addrs are
# used. Note: this `continue` will bypass the potential .stop()
# below.
continue
for f in list(wallet._fusions_auto):
if not f.is_alive():
wallet._fusions_auto.discard(f)
active_autofusions = list(wallet._fusions_auto)
if dont_start_fusions and not active_autofusions:
continue
num_auto = len(active_autofusions)
wallet_conf = Conf(wallet)
eligible, ineligible, sum_value, has_unconfirmed, has_coinbase = select_coins(wallet)
target_num_auto, confirmed_only = get_target_params_1(wallet, wallet_conf, active_autofusions, eligible)
if confirmed_only and has_unconfirmed:
for f in list(wallet._fusions_auto):
f.stop('Wallet has unconfirmed coins... waiting.', not_if_running = True)
continue
fuse_depth = Conf(wallet).fuse_depth
if fuse_depth > 0:
sum_eligible_values = 0
sum_fuz_values = 0
for eaddr, ecoins in eligible:
ecoins_value = sum(ecoin['value'] for ecoin in ecoins)
sum_eligible_values += ecoins_value
if self.is_fuz_address(wallet, eaddr, require_depth=fuse_depth-1):
sum_fuz_values += ecoins_value
if sum_eligible_values != 0 and sum_fuz_values / sum_eligible_values >= FUSE_DEPTH_THRESHOLD:
continue
if not dont_start_fusions and num_auto < min(target_num_auto, MAX_AUTOFUSIONS_PER_WALLET):
# we don't have enough auto-fusions running, so start one
fraction = get_target_params_2(wallet_conf, sum_value)
chosen_buckets = select_random_coins(wallet, fraction, eligible)
coins = [c for l in chosen_buckets for c in l]
if not coins:
self.print_error("auto-fusion skipped due to lack of coins")
continue
if wallet_conf.fusion_mode == 'consolidate':
max_outputs = CONSOLIDATE_MAX_OUTPUTS
if len(chosen_buckets) < (MIN_TX_COMPONENTS - max_outputs):
self.print_error("consolidating auto-fusion skipped due to lack of unrelated coins")
continue
else:
max_outputs = None
try:
f = self.start_fusion(wallet, password, coins, max_outputs = max_outputs, inactive_timeout = AUTOFUSE_INACTIVE_TIMEOUT)
self.print_error("started auto-fusion")
except RuntimeError as e:
self.print_error(f"auto-fusion skipped due to error: {e}")
return
wallet._fusions_auto.add(f)
def start_fusion_server(self, network, bindhost, port, upnp = None, announcehost = None, donation_address = None):
if self.fusion_server:
raise RuntimeError("server already running")
donation_address = (isinstance(donation_address, Address) and donation_address) or None
self.fusion_server = FusionServer(self.config, network, bindhost, port, upnp = upnp, announcehost = announcehost, donation_address = donation_address)
self.fusion_server.start()
return self.fusion_server.host, self.fusion_server.port
def stop_fusion_server(self):
try:
self.fusion_server.stop('server stopped by operator')
self.fusion_server = None
except Exception:
pass
def update_coins_ui(self, wallet):
''' Default implementation does nothing. Qt plugin subclass overrides
this, which sends a signal to the main thread to update the coins tab.
This is called by the Fusion thread (in its thread context) when it
freezes & unfreezes coins. '''
def notify_server_status(self, b, tup : tuple = None):
''' The Qt plugin subclass implements this to tell the GUI about bad
servers. '''
if not b: self.print_error("notify_server_status:", b, str(tup))
@hook
def donation_address(self, window) -> Optional[Tuple[str,Address]]:
''' Plugin API: Returns a tuple of (description, Address) or None. This
is the donation address that we as a client got from the remote server
(as opposed to the donation address we announce if we are a server). '''
if self.remote_donation_address and Address.is_valid(self.remote_donation_address):
return (self.fullname() + " " + _("Server") + ": " + self.get_server()[0], Address.from_string(self.remote_donation_address))
@staticmethod
def wallet_can_fuse(wallet) -> bool:
return can_fuse_from(wallet) and can_fuse_to(wallet)
@staticmethod
def is_fuz_coin(wallet, coin, *, require_depth=0) -> Optional[bool]:
""" Returns True if the coin in question is definitely a CashFusion coin (uses heuristic matching),
or False if the coin in question is not from a CashFusion tx. Returns None if the tx for the coin
is not (yet) known to the wallet (None == inconclusive answer, caller may wish to try again later).
If require_depth is > 0, check recursively; will return True if all ancestors of the coin
up to require_depth are also CashFusion transactions belonging to this wallet.
Precondition: wallet must be a fusion wallet. """
require_depth = min(max(0, require_depth), 900) # paranoia: clamp to [0, 900]
cache = wallet._cashfusion_is_fuz_txid_cache
assert isinstance(cache, dict)
txid = coin['prevout_hash']
# check cache, if cache hit, return answer and avoid the lookup below
cached_val = cache.get(txid, None)
if cached_val is not None:
# cache stores either False, or a depth for which the predicate is true
if cached_val is False:
return False
elif cached_val >= require_depth:
return True
my_addresses_seen = set()
def check_is_fuz_tx():
tx = wallet.transactions.get(txid, None)
if tx is None:
# Not found in wallet.transactions so its fuz status is as yet "unknown". Indicate this.
return None
inputs = tx.inputs()
outputs = tx.outputs()
# We expect: OP_RETURN (4) FUZ\x00
fuz_prefix = bytes((OpCodes.OP_RETURN, len(Protocol.FUSE_ID))) + Protocol.FUSE_ID
# Step 1 - does it have the proper OP_RETURN lokad prefix?
for typ, dest, amt in outputs:
if amt == 0 and typ == TYPE_SCRIPT and dest.script.startswith(fuz_prefix):
break # lokad found, proceed to Step 2 below
else:
# Nope, lokad prefix not found
return False
# Step 2 - are at least 1 of the inputs from me? (DoS prevention measure)
for inp in inputs:
inp_addr = inp.get('address', None)
if inp_addr is not None and (inp_addr in my_addresses_seen or wallet.is_mine(inp_addr)):
my_addresses_seen.add(inp_addr)
if require_depth == 0:
return True # This transaction is a CashFusion tx
# [Optional] Step 3 - Check if all ancestors up to required_depth are also fusions
if not FusionPlugin.is_fuz_coin(wallet, inp, require_depth=require_depth-1):
# require_depth specified and not all required_depth parents were CashFusion
return False
if my_addresses_seen:
# require_depth > 0: This tx + all wallet ancestors were CashFusion transactions up to require_depth
return True
# Failure -- this tx has the lokad but no inputs are "from me".
wallet.print_error(f"CashFusion: txid \"{txid}\" has a CashFusion-style OP_RETURN but none of the "
f"inputs are from this wallet. This is UNEXPECTED!")
return False
# /check_is_fuz_tx
answer = check_is_fuz_tx()
if isinstance(answer, bool):
# maybe cache the answer if it's a definitive answer True/False
if require_depth == 0:
# we got an answer for this coin's tx itself
if not answer:
cache[txid] = False
elif not cached_val:
# only set the cached val if it was missing previously, to avoid overwriting higher values
cache[txid] = 0
elif answer and (cached_val is None or cached_val < require_depth):
# indicate true up to the depth we just checked
cache[txid] = require_depth
elif not answer and isinstance(cached_val, int) and cached_val >= require_depth:
# this should never happen
wallet.print_error(f"CashFusion: WARNING txid \"{txid}\" has inconsistent state in "
f"the _cashfusion_is_fuz_txid_cache")
if answer:
# remember this address as being a "fuzed" address and cache the positive reply
cache2 = wallet._cashfusion_address_cache
assert isinstance(cache2, dict)
addr = coin.get('address', None)
if addr:
my_addresses_seen.add(addr)
for addr in my_addresses_seen:
depth = cache2.get(addr, None)
if depth is None or depth < require_depth:
cache2[addr] = require_depth
return answer
@classmethod
def get_coin_fuz_count(cls, wallet, coin, *, require_depth=0):
""" Will return a fuz count for a coin. Unfused or unknown coins have count 0, coins
that appear in a fuz tx have count 1, coins whose wallet parent txs are all fuz are 2, 3, etc
depending on how far back the fuz perdicate is satisfied.
This function only checks up to 10 ancestors deep so tha maximum return value is 10.
Precondition: wallet must be a fusion wallet. """
require_depth = min(max(require_depth, 0), MAX_LIMIT_FUSE_DEPTH - 1)
cached_ct = wallet._cashfusion_is_fuz_txid_cache.get(coin['prevout_hash'])
if isinstance(cached_ct, int) and cached_ct >= require_depth:
return cached_ct + 1
ret = 0
for i in range(cached_ct or 0, require_depth + 1, 1):
ret = i
if not cls.is_fuz_coin(wallet, coin, require_depth=i):
break
return ret
@classmethod
def is_fuz_address(cls, wallet, address, *, require_depth=0):
""" Returns True if address contains any fused UTXOs.
Optionally, specify require_depth, in which case True is returned
if any UTXOs for this address are sufficiently fused to the
specified depth.
If you want thread safety, caller must hold wallet locks.
Precondition: wallet must be a fusion wallet. """
assert isinstance(address, Address)
require_depth = max(require_depth, 0)
cache = wallet._cashfusion_address_cache
assert isinstance(cache, dict)
cached_val = cache.get(address, None)
if cached_val is not None and cached_val >= require_depth:
return True
utxos = wallet.get_addr_utxo(address)
for coin in utxos.values():
if cls.is_fuz_coin(wallet, coin, require_depth=require_depth):
if cached_val is None or cached_val < require_depth:
cache[address] = require_depth
return True
return False
@staticmethod
def on_wallet_transaction(event, *args):
""" Network object callback. Always called in the Network object's thread. """
if event == 'new_transaction':
# if this is a fusion wallet, clear the is_fuz_address() cache when new transactions arrive
# since we may have spent some utxos and so the cache needs to be invalidated
wallet = args[1]
if hasattr(wallet, '_cashfusion_address_cache'):
with wallet.lock:
wallet._cashfusion_address_cache.clear()
@daemon_command
def fusion_server_start(self, daemon, config):
# Usage:
# ./electron-cash daemon fusion_server_start <bindhost>(,<announcehost>) <port>
# ./electron-cash daemon fusion_server_start <bindhost>(,<announcehost>) <port> upnp
# ./electron-cash daemon fusion_server_start <bindhost>(,<announcehost>) <port> <donation_addr>
# ./electron-cash daemon fusion_server_start <bindhost>(,<announcehost>) <port> upnp <donation_addr>
# e.g.:
# ./electron-cash daemon fusion_server_start 0.0.0.0,myfusionserver.com 8787 upnp bitcoincash:qpxiweuqoiweweqeweqw
#
# The main server port will be bound on <bindhost>:<port>.
# Covert submissions will be bound on <bindhost>:<ephemeral_port> (the port is chosen by the OS)
# The main server will tell clients to connect to <announcehost>:<ephemeral_port> .
# The default announcehost is based on an autodetection system, which may not work for some server networking setups.
network = daemon.network
if not network:
return "error: cannot run fusion server without an SPV server connection"
def invoke(firstarg = '0.0.0.0', sport='8787', upnp_str = None, addr_str = None):
bindhost, *extrahosts = firstarg.split(',')
if len(extrahosts) > 1:
raise Exception("too many hosts")
elif len(extrahosts) == 1:
[announcehost,] = extrahosts
else:
announcehost = None
port = int(sport)
pnp = get_upnp() if upnp_str == 'upnp' else None
if not pnp and not addr_str:
# third arg may be addr_str, so swap the args
addr_str = upnp_str
upnp_str = None
addr = None
if addr_str:
assert Address.is_valid(addr_str), "Invalid donation address specified"
addr = Address.from_string(addr_str)
return self.start_fusion_server(network, bindhost, port, upnp = pnp, announcehost = announcehost, donation_address = addr)
try:
host, port = invoke(*config.get('subargs', ()))
except Exception as e:
import traceback, sys; traceback.print_exc(file=sys.stderr)
return f'error: {str(e)}'
return (host, port)
@daemon_command
def fusion_server_stop(self, daemon, config):
self.stop_fusion_server()
return 'ok'
@daemon_command
def fusion_server_status(self, daemon, config):
if not self.fusion_server:
return "fusion server not running"
return dict(poolsizes = {t: len(pool.pool) for t,pool in self.fusion_server.waiting_pools.items()})
@daemon_command
def fusion_server_fuse(self, daemon, config):
if self.fusion_server is None:
return
subargs = config.get('subargs', ())
if len(subargs) != 1:
return "expecting tier"
tier = int(subargs[0])
num_clients = self.fusion_server.start_fuse(tier)
return num_clients
|
data_utils.py
|
import threading
import traceback
from torch.multiprocessing import Process, Queue, Pool
import numpy as np
import os
import torch
def get_input(data, render):
real_image = data['image']
input_semantics, rotated_mesh, orig_landmarks, rotate_landmarks, \
rendered_images_erode, original_angles, Rd_a, rendered_images_rotate_artifacts\
= render.rotate_render(data['param_path'], real_image, data['M'])
output = {}
real_image = real_image * 2 - 1
input_semantics = input_semantics * 2 - 1
rotated_mesh = rotated_mesh * 2 - 1
rendered_images_erode = rendered_images_erode * 2 - 1
Rd_a = Rd_a * 2 - 1
rendered_images_rotate_artifacts = rendered_images_rotate_artifacts * 2 - 1
output['image'] = real_image.cpu()
output['rendered_images_erode'] = rendered_images_erode.cpu()
output['mesh'] = input_semantics.cpu()
output['rotated_mesh'] = rotated_mesh.cpu()
output['Rd_a'] = Rd_a.cpu()
output['orig_landmarks'] = orig_landmarks.cpu()
output['rotated_landmarks'] = rotate_landmarks.cpu()
output['original_angles'] = original_angles.cpu()
output['rendered_images_rotate_artifacts'] = rendered_images_rotate_artifacts.cpu()
output['path'] = data['path']
return output
def get_test_input(data, render):
real_image = data['image']
rotated_mesh, rotate_landmarks, original_angles\
= render.rotate_render(data['param_path'], real_image, data['M']) # 全部重点在这里了。
output = {}
real_image = real_image * 2 - 1
rotated_mesh = rotated_mesh * 2 - 1
output['image'] = real_image.cpu()
output['rotated_mesh'] = rotated_mesh.cpu()
output['rotated_landmarks'] = rotate_landmarks.cpu()
output['original_angles'] = original_angles.cpu()
output['path'] = data['path']
return output
def get_multipose_test_input(data, render, yaw_poses, pitch_poses):
real_image = data['image']
num_poses = len(yaw_poses) + len(pitch_poses)
rotated_meshs = []
rotated_landmarks_list = []
original_angles_list = []
rotated_landmarks_list_106 = []
paths = []
real_images = []
pose_list = []
for i in range(2):
prefix = 'yaw' if i == 0 else 'pitch'
poses = yaw_poses if i == 0 else pitch_poses
for pose in poses:
if i == 0:
rotated_mesh, rotate_landmarks, original_angles, rotate_landmarks_106\
= render.rotate_render(data['param_path'], real_image, data['M'], yaw_pose=pose) # 输入是66个3D的浮点数,3*256*256图像,
else:
rotated_mesh, rotate_landmarks, original_angles, rotate_landmarks_106\
= render.rotate_render(data['param_path'], real_image, data['M'], pitch_pose=pose)
rotated_meshs.append(rotated_mesh)
rotated_landmarks_list.append(rotate_landmarks)
rotated_landmarks_list_106.append(rotate_landmarks_106)
original_angles_list.append(original_angles)
paths += data['path']
pose_list += ['{}_{}'.format(prefix, pose) for i in range(len(data['path']))]
real_images.append(real_image)
rotated_meshs = torch.cat(rotated_meshs, 0)
rotated_landmarks_list = torch.cat(rotated_landmarks_list, 0)
rotated_landmarks_list_106 = torch.cat(rotated_landmarks_list_106, 0)
original_angles_list = torch.cat(original_angles_list, 0)
output = {}
real_image = real_image * 2 - 1
rotated_meshs = rotated_meshs * 2 - 1
output['image'] = real_image.cpu()
output['rotated_mesh'] = rotated_meshs.cpu().permute(0,3,1,2).contiguous() # 3*256*256
output['rotated_landmarks'] = rotated_landmarks_list.cpu() # 68*3
output['rotated_landmarks_106'] = rotated_landmarks_list_106.cpu() # 106*3
output['original_angles'] = original_angles_list.cpu() # 一个浮点数,
output['path'] = paths
output['pose_list'] = pose_list # 目标角度,9个以y轴为旋转轴的人脸角度。
return output
class data_prefetcher():
def __init__(self, loader, opt, render_layer):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.opt = opt
self.render_layer = render_layer
self.preload()
def preload(self):
try:
data = next(self.loader)
except StopIteration:
self.next_input = None
return
if self.opt.isTrain:
self.next_input = get_input(data, self.render_layer)
elif self.opt.yaw_poses is None and self.opt.pitch_poses is None:
self.next_input = get_test_input(data, self.render_layer)
else: # 执行这一步
if self.opt.yaw_poses is not None:
if self.opt.posesrandom:
self.opt.yaw_poses = [round(np.random.uniform(-0.5, 0.5, 1)[0], 2) for k in range(len(self.opt.yaw_poses))]
else:
self.opt.yaw_poses = []
if self.opt.pitch_poses is not None:
if self.opt.posesrandom:
self.opt.pitch_poses = [round(np.random.uniform(-0.5, 0.5, 1)[0], 2) for k in range(len(self.opt.pitch_poses))]
else:
self.opt.pitch_poses = []
self.next_input = get_multipose_test_input(data, self.render_layer, self.opt.yaw_poses, self.opt.pitch_poses) # 关键步骤
with torch.cuda.stream(self.stream):
for k, v in self.next_input.items():
if type(v) == torch.Tensor:
self.next_input[k] = v.cuda(non_blocking=True)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
if input is not None:
for k in input.keys():
if type(input[k]) == torch.Tensor: # 转换成cuda数据,并传送给后面程序
input[k].record_stream(torch.cuda.current_stream())
self.preload()
return input
def prefetch_data(queue, dataloader, iter_counter, opt, render_layer): # 抓取数据,是在子进程中执行的过程
print("start prefetching data...")
np.random.seed(os.getpid())
for epoch in iter_counter.training_epochs():
prefetcher = data_prefetcher(dataloader, opt, render_layer) # 生成一个迭代器
input = prefetcher.next()
while input is not None:
try:
queue.put(input)
except Exception as e:
traceback.print_exc()
raise e
input = prefetcher.next()
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
data = data_queue.get()
for k, v in data.items():
data[k] = v.pin_memory()
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return
def init_parallel_jobs(queue, dataloader, iter_counter, opt, render_layer):
if isinstance(dataloader, list):
tasks = [Process(target=prefetch_data, args=(queue, dataloader[i], iter_counter, opt, render_layer[i])) for i in range(opt.render_thread)]
else:
tasks = [Process(target=prefetch_data, args=(queue, dataloader, iter_counter, opt, render_layer)) for i in range(opt.render_thread)]
# task.daemon = True
for task in tasks:
task.start()
return tasks
|
train.py
|
"""
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import os
os.environ['OMP_NUM_THREADS'] = '1'
import argparse
import torch
from src.env import MultipleEnvironments
from src.model import PPO
from src.process import eval
import torch.multiprocessing as _mp
from torch.distributions import Categorical
import torch.nn.functional as F
import numpy as np
import shutil
def get_args():
parser = argparse.ArgumentParser(
"""Implementation of model described in the paper: Proximal Policy Optimization Algorithms for Super Mario Bros""")
parser.add_argument("--world", type=int, default=1)
parser.add_argument("--stage", type=int, default=1)
parser.add_argument("--action_type", type=str, default="simple")
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--gamma', type=float, default=0.9, help='discount factor for rewards')
parser.add_argument('--tau', type=float, default=1.0, help='parameter for GAE')
parser.add_argument('--beta', type=float, default=0.01, help='entropy coefficient')
parser.add_argument('--epsilon', type=float, default=0.2, help='parameter for Clipped Surrogate Objective')
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--num_epochs', type=int, default=10)
parser.add_argument("--num_local_steps", type=int, default=512)
parser.add_argument("--num_global_steps", type=int, default=5e6)
parser.add_argument("--num_processes", type=int, default=8)
parser.add_argument("--save_interval", type=int, default=10, help="Number of steps between savings")
parser.add_argument("--max_actions", type=int, default=200, help="Maximum repetition steps in test phase")
parser.add_argument("--log_path", type=str, default="tensorboard/ppo_super_mario_bros")
parser.add_argument("--saved_path", type=str, default="trained_models")
args = parser.parse_args()
return args
def train(opt):
if torch.cuda.is_available():
torch.cuda.manual_seed(123)
else:
torch.manual_seed(123)
if os.path.isdir(opt.log_path):
shutil.rmtree(opt.log_path)
os.makedirs(opt.log_path)
if not os.path.isdir(opt.saved_path):
os.makedirs(opt.saved_path)
mp = _mp.get_context("spawn")
envs = MultipleEnvironments(opt.world, opt.stage, opt.action_type, opt.num_processes)
model = PPO(envs.num_states, envs.num_actions)
torch_path = f"{opt.saved_path}/ppo_super_mario_bros_{opt.world}_{opt.stage}"
if os.path.isfile(torch_path):
model.load_state_dict(torch.load(torch_path))
print(f'loaded : {torch_path}')
if torch.cuda.is_available():
model.cuda()
model.share_memory()
process = mp.Process(target=eval, args=(opt, model, envs.num_states, envs.num_actions))
process.start()
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
[agent_conn.send(("reset", None)) for agent_conn in envs.agent_conns]
curr_states = [agent_conn.recv() for agent_conn in envs.agent_conns]
curr_states = torch.from_numpy(np.concatenate(curr_states, 0))
if torch.cuda.is_available():
curr_states = curr_states.cuda()
curr_episode = 0
while True:
if curr_episode % opt.save_interval == 0 and curr_episode > 0:
torch.save(model.state_dict(), torch_path)
# torch.save(model.state_dict(), f"{torch_path}_{curr_episode}")
curr_episode += 1
old_log_policies = []
actions = []
values = []
states = []
rewards = []
dones = []
for _ in range(opt.num_local_steps):
states.append(curr_states)
logits, value = model(curr_states)
values.append(value.squeeze())
policy = F.softmax(logits, dim=1)
old_m = Categorical(policy)
action = old_m.sample()
actions.append(action)
old_log_policy = old_m.log_prob(action)
old_log_policies.append(old_log_policy)
if torch.cuda.is_available():
[agent_conn.send(("step", act)) for agent_conn, act in zip(envs.agent_conns, action.cpu())]
else:
[agent_conn.send(("step", act)) for agent_conn, act in zip(envs.agent_conns, action)]
state, reward, done, info = zip(*[agent_conn.recv() for agent_conn in envs.agent_conns])
state = torch.from_numpy(np.concatenate(state, 0))
if torch.cuda.is_available():
state = state.cuda()
reward = torch.cuda.FloatTensor(reward)
done = torch.cuda.FloatTensor(done)
else:
reward = torch.FloatTensor(reward)
done = torch.FloatTensor(done)
rewards.append(reward)
dones.append(done)
curr_states = state
_, next_value, = model(curr_states)
next_value = next_value.squeeze()
old_log_policies = torch.cat(old_log_policies).detach()
actions = torch.cat(actions)
values = torch.cat(values).detach()
states = torch.cat(states)
gae = 0
R = []
for value, reward, done in list(zip(values, rewards, dones))[::-1]:
gae = gae * opt.gamma * opt.tau
gae = gae + reward + opt.gamma * next_value.detach() * (1 - done) - value.detach()
next_value = value
R.append(gae + value)
R = R[::-1]
R = torch.cat(R).detach()
advantages = R - values
for i in range(opt.num_epochs):
indice = torch.randperm(opt.num_local_steps * opt.num_processes)
for j in range(opt.batch_size):
batch_indices = indice[
int(j * (opt.num_local_steps * opt.num_processes / opt.batch_size)): int((j + 1) * (
opt.num_local_steps * opt.num_processes / opt.batch_size))]
logits, value = model(states[batch_indices])
new_policy = F.softmax(logits, dim=1)
new_m = Categorical(new_policy)
new_log_policy = new_m.log_prob(actions[batch_indices])
ratio = torch.exp(new_log_policy - old_log_policies[batch_indices])
actor_loss = -torch.mean(torch.min(ratio * advantages[batch_indices],
torch.clamp(ratio, 1.0 - opt.epsilon, 1.0 + opt.epsilon) *
advantages[
batch_indices]))
# critic_loss = torch.mean((R[batch_indices] - value) ** 2) / 2
critic_loss = F.smooth_l1_loss(R[batch_indices], value.squeeze())
entropy_loss = torch.mean(new_m.entropy())
total_loss = actor_loss + critic_loss - opt.beta * entropy_loss
optimizer.zero_grad()
total_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
print("Episode: {}. Total loss: {}".format(curr_episode, total_loss))
if __name__ == "__main__":
opt = get_args()
train(opt)
|
tracker.py
|
"""
Tracker script for DMLC
Implements the tracker control protocol
- start dmlc jobs
- start ps scheduler and rabit tracker
- help nodes to establish links with each other
Tianqi Chen
"""
# pylint: disable=invalid-name, missing-docstring, too-many-arguments, too-many-locals
# pylint: disable=too-many-branches, too-many-statements
from __future__ import absolute_import
import os
import sys
import socket
import struct
import subprocess
import argparse
import time
import logging
from threading import Thread
class ExSocket(object):
"""
Extension of socket to handle recv and send of special data
"""
def __init__(self, sock):
self.sock = sock
def recvall(self, nbytes):
res = []
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return ''.join(res)
def recvint(self):
return struct.unpack('@i', self.recvall(4))[0]
def sendint(self, n):
self.sock.sendall(struct.pack('@i', n))
def sendstr(self, s):
self.sendint(len(s))
self.sock.sendall(s)
def recvstr(self):
slen = self.recvint()
return self.recvall(slen)
# magic number used to verify existence of data
kMagic = 0xff99
def get_some_ip(host):
return socket.getaddrinfo(host, None)[0][4][0]
def get_family(addr):
return socket.getaddrinfo(addr, None)[0][0]
class SlaveEntry(object):
def __init__(self, sock, s_addr):
slave = ExSocket(sock)
self.sock = slave
self.host = get_some_ip(s_addr[0])
magic = slave.recvint()
assert magic == kMagic, 'invalid magic number=%d from %s' % (magic, self.host)
slave.sendint(kMagic)
self.rank = slave.recvint()
self.world_size = slave.recvint()
self.jobid = slave.recvstr()
self.cmd = slave.recvstr()
self.wait_accept = 0
self.port = None
def decide_rank(self, job_map):
if self.rank >= 0:
return self.rank
if self.jobid != 'NULL' and self.jobid in job_map:
return job_map[self.jobid]
return -1
def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map):
self.rank = rank
nnset = set(tree_map[rank])
rprev, rnext = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev != -1 and rprev != rank:
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if rnext != -1 and rnext != rank:
nnset.add(rnext)
self.sock.sendint(rnext)
else:
self.sock.sendint(-1)
while True:
ngood = self.sock.recvint()
goodset = set([])
for _ in xrange(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
self.sock.sendint(wait_conn[r].port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successuly setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class RabitTracker(object):
"""
tracker for rabit
"""
def __init__(self, hostIP, nslave, port=9091, port_end=9999):
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind((hostIP, port))
self.port = port
break
except socket.error as e:
if e.errno == 98:
continue
else:
raise
sock.listen(256)
self.sock = sock
self.hostIP = hostIP
self.thread = None
self.start_time = None
self.end_time = None
self.nslave = nslave
logging.info('start listen on %s:%d', hostIP, self.port)
def __del__(self):
self.sock.close()
@staticmethod
def get_neighbor(rank, nslave):
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank / 2 - 1)
if rank * 2 - 1 < nslave:
ret.append(rank * 2 - 1)
if rank * 2 < nslave:
ret.append(rank * 2)
return ret
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
return {'DMLC_TRACKER_URI': self.hostIP,
'DMLC_TRACKER_PORT': self.port}
def get_tree(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = self.get_neighbor(r, nslave)
parent_map[r] = (r + 1) / 2 - 1
return tree_map, parent_map
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0 : 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ = {}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def accept_slaves(self, nslave):
# set of nodes that finishs the job
shutdown = {}
# set of nodes that is waiting for connections
wait_conn = {}
# maps job id to rank
job_map = {}
# list of workers that is pending to be assigned rank
pending = []
# lazy initialize tree_map
tree_map = None
while len(shutdown) != nslave:
fd, s_addr = self.sock.accept()
s = SlaveEntry(fd, s_addr)
if s.cmd == 'print':
msg = s.sock.recvstr()
logging.info(msg.strip())
continue
if s.cmd == 'shutdown':
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
continue
assert s.cmd == 'start' or s.cmd == 'recover'
# lazily initialize the slaves
if tree_map is None:
assert s.cmd == 'start'
if s.world_size > 0:
nslave = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(nslave)
# set of nodes that is pending for getting up
todo_nodes = range(nslave)
else:
assert s.world_size == -1 or s.world_size == nslave
if s.cmd == 'recover':
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert len(todo_nodes) != 0
pending.append(s)
if len(pending) == len(todo_nodes):
pending.sort(key=lambda x: x.host)
for s in pending:
rank = todo_nodes.pop(0)
if s.jobid != 'NULL':
job_map[s.jobid] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.debug('Recieve %s signal from %s; assign rank %d',
s.cmd, s.host, s.rank)
if len(todo_nodes) == 0:
logging.info('@tracker All of %d nodes getting started', nslave)
self.start_time = time.time()
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.info('@tracker All nodes finishes job')
self.end_time = time.time()
logging.info('@tracker %s secs between node start and job finish',
str(self.end_time - self.start_time))
def start(self, nslave):
def run():
self.accept_slaves(nslave)
self.thread = Thread(target=run, args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
while self.thread.isAlive():
self.thread.join(100)
class PSTracker(object):
"""
Tracker module for PS
"""
def __init__(self, hostIP, cmd, port=9091, port_end=9999, envs=None):
"""
Starts the PS scheduler
"""
self.cmd = cmd
if cmd is None:
return
envs = {} if envs is None else envs
self.hostIP = hostIP
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind(('', port))
self.port = port
sock.close()
break
except socket.error:
continue
env = os.environ.copy()
env['DMLC_ROLE'] = 'scheduler'
env['DMLC_PS_ROOT_URI'] = str(self.hostIP)
env['DMLC_PS_ROOT_PORT'] = str(self.port)
for k, v in envs.items():
env[k] = str(v)
self.thread = Thread(
target=(lambda: subprocess.check_call(self.cmd, env=env, shell=True)), args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
if self.cmd is not None:
while self.thread.isAlive():
self.thread.join(100)
def slave_envs(self):
if self.cmd is None:
return {}
else:
return {'DMLC_PS_ROOT_URI': self.hostIP,
'DMLC_PS_ROOT_PORT': self.port}
def get_host_ip(hostIP=None):
if hostIP is None or hostIP == 'auto':
hostIP = 'ip'
if hostIP == 'dns':
hostIP = socket.getfqdn()
elif hostIP == 'ip':
from socket import gaierror
try:
hostIP = socket.gethostbyname(socket.getfqdn())
except gaierror:
logging.warn('gethostbyname(socket.getfqdn()) failed... trying on hostname()')
hostIP = socket.gethostbyname(socket.gethostname())
if hostIP.startswith("127."):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# doesn't have to be reachable
s.connect(('10.255.255.255', 0))
hostIP = s.getsockname()[0]
return hostIP
def submit(nworker, nserver, fun_submit, hostIP='auto', pscmd=None):
if nserver == 0:
pscmd = None
envs = {'DMLC_NUM_WORKER' : nworker,
'DMLC_NUM_SERVER' : nserver}
hostIP = get_host_ip(hostIP)
rabit = RabitTracker(hostIP=hostIP, nslave=nworker)
pserver = PSTracker(hostIP=hostIP, cmd=pscmd, envs=envs)
envs.update(rabit.slave_envs())
envs.update(pserver.slave_envs())
rabit.start(nworker)
fun_submit(nworker, nserver, envs)
pserver.join()
# start rabit tracker in another thread
if nserver == 0:
rabit.join()
def start_rabit_tracker(args):
"""Standalone function to start rabit tracker.
Parameters
----------
args: arguments to start the rabit tracker.
"""
envs = {'DMLC_NUM_WORKER' : args.num_workers,
'DMLC_NUM_SERVER' : args.num_servers}
rabit = RabitTracker(hostIP=get_host_ip(args.host_ip), nslave=args.num_workers)
envs.update(rabit.slave_envs())
rabit.start(args.num_workers)
sys.stdout.write('DMLC_TRACKER_ENV_START\n')
# simply write configuration to stdout
for k, v in envs.items():
sys.stdout.write('%s=%s\n' % (k, str(v)))
sys.stdout.write('DMLC_TRACKER_ENV_END\n')
sys.stdout.flush()
rabit.join()
def main():
"""Main function if tracker is executed in standalone mode."""
parser = argparse.ArgumentParser(description='Rabit Tracker start.')
parser.add_argument('--num-workers', required=True, type=int,
help='Number of worker proccess to be launched.')
parser.add_argument('--num-servers', default=0, type=int,
help='Number of server process to be launched. Only used in PS jobs.')
parser.add_argument('--host-ip', default=None, type=str,
help=('Host IP addressed, this is only needed ' +
'if the host IP cannot be automatically guessed.'))
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help='Logging level of the logger.')
args = parser.parse_args()
fmt = '%(asctime)s %(levelname)s %(message)s'
if args.log_level == 'INFO':
level = logging.INFO
elif args.log_level == 'DEBUG':
level = logging.DEBUG
else:
raise RuntimeError("Unknown logging level %s" % args.log_level)
logging.basicConfig(format=fmt, level=level)
if args.num_servers == 0:
start_rabit_tracker(args)
else:
raise RuntimeError("Do not yet support start ps tracker in standalone mode.")
if __name__ == "__main__":
main()
|
sockets.py
|
from __future__ import absolute_import
import socket
import sys
import threading
import numpy as np
# FIXME close sockets when simulator is closed, remove SO_REUSEPORT
# Currently Nengo does not provide a mechanism for this, thus we allow to
# reuse ports currently to avoid problems with addresses already in use (that
# would especially occur in the GUI).
# TODO better handling of shuffled packets
# If packets get shuffled during transmission, we only keep the first packet
# with a future timestamp and drop all packets with an earlier timestamp if
# they arrive after that packet. Those might still be usable if the current
# simulation time does not exceed the timestamp of those packages. This could
# probably be solved with a priority queue (Python module heapq) to insert
# future packages.
# TODO IPv6 support?
class ConnectionTimeout(RuntimeError):
pass
class _UDPSocket(object):
def __init__(self, addr, dims, byte_order, timeout=None):
self.addr = addr
self.dims = dims
if byte_order == "little":
byte_order = "<"
elif byte_order == "big":
byte_order = ">"
if byte_order not in "<>=":
raise RuntimeError("Must be one of '<', '>', '=', 'little', "
"'big'.", attr="byte_order")
self.byte_order = byte_order
if np.isscalar(timeout):
self.timeout = (timeout, timeout)
else:
self.timeout = timeout
self._buffer = np.empty(dims + 1, dtype="%sf8" % byte_order)
self._buffer[0] = np.nan
self._socket = None
@property
def t(self):
return self._buffer[0]
@property
def x(self):
return self._buffer[1:]
@property
def closed(self):
return self._socket is None
def open(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if sys.platform.startswith('bsd') or sys.platform.startswith('darwin'):
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
else:
# Linux >= 3.9 has SO_REUSEPORT, but does load balancing for it.
# We want all data to go the last opened socket.
# More details:
# https://stackoverflow.com/questions/14388706/socket-options-so-reuseaddr-and-so-reuseport-how-do-they-differ-do-they-mean-t?rq=1
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def bind(self):
self._socket.bind(self.addr)
def recv(self, timeout):
self._socket.settimeout(timeout)
self._socket.recv_into(self._buffer.data)
print("socket recv data = "+str(self._buffer))
def send(self, t, x):
self._buffer[0] = t
self._buffer[1:] = x
print("send data = "+str(self._buffer))
self._socket.sendto(self._buffer.tobytes(), self.addr)
def close(self):
if not self.closed:
self._socket.close()
self._socket = None
class SocketStep(object):
r"""Handles the step for socket processes.
One critical thing in this is to align local and remote timesteps if the
timestep width (dt) differs. To figure out, the right thing to do we have
to consider two cases:
1. Remote dt is smaller or equal than local dt.
2. Remote dt is larger than local dt.
But first, some notation: the local timestamp of the current step is
denoted with :math:`t`, the local timestep width with :math:`dt`. For the
corresponding remote variable :math:`t'` and :math:`dt'` are used. The
:math:`t'` is the value read from a remote
It is also helpful to visualize the timeline with a little diagram:
.. code-block:: none
1 2 3 4 5 6 7 8 timestep indices
[ | ][ | ][ | ][ | ][ | ][ | ][ | ][ | ]
[ | ][ | ][ | ][
1 2 3 timestep indices
Each timestep can be represented as an interval ``[ | ]``. The ``|``
denotes the middle of the interval and corresponds to :math:`t` and
:math:`t'` respectively. The width of each ``[ | ]`` corresponds to
:math:`dt` (or :math:`dt'`).
Let us consider the first case. The bottom row in the diagram denotes the
local end in this case. Sending packets is simple: we can just send
a packet in each timestep because the remote end would be able to process
even more data.
For receiving packets, we have multiple options as we are potentially
getting more packets then timesteps. We could average over multiple
packets, use the packet closest to the timestep interval mean, or use the
first packet that falls into the local timestep interval. In the code here,
we are using that last option because it does not require knowledge of the
timestep between sent packages on the remote end (depending on the
implementation it might just send a package every timestep or adjust the
sending frequency to the local :math:`dt`).
The logic described in text here, can be expressed as an inequality for
when to use a packet: :math:`t - dt/2 <= t' < t + dt/2`. In the `recv`
method this inequality is split up into two parts. The left part is handled
by the while loop (because it is a while and not an if condition, the logic
of the condition gets inverted). The right inequality is handled by the
following if condition. Note, that we set :math:`dt' = dt` if
:math:`dt' <= dt` and thus we can use :math:`dt'` instead of :math:`dt`
which allows us to use exactly the same code for the second case discussed
next.
In the second case the top row in the diagram corresponds to the remote
end. When receiving data, each local timestep should use the remote value
from the remote timestep with the largest overlap in the interval (because
that value will be most representative for the given local timestep). So
given the picture above, local timesteps 1, 2, 3 should use the remote
value for timestep 1; 4, 5 the value for 2; 6, 7, 8 the value for 3. Thus,
the first local timestep that overlaps more than 50% with the next remote
timestep interval should receive a new packet. Expressed as an equation, if
:math:`t' + dt'/2 < t` (where :math:`t'` is the last received timestamp),
a new packet should be received. Note that this is equivalent to the left
inequality obtained in the first case, so we don't need special handling
for this case. Also, the right inequality applies. If the received value
does not fulfil :math:`t' - dt'/2 < t` (where :math:`t'` is now the
timestep of the newly received packet), it is a value that corresponds to
timesteps that are still in the future and should not be used yet.
When sending data, we could send a packet every timestep, but this would
flood the remote end with packets that it does not use (at least currently
where only a single value is used and no averaging is done). So, we want
to send the next packet at the :math:`t` closest to :math:`t' + dt'` (where
:math:`t'` is the timestamp of the last sent packet). Expressed as an
equation a packet should be send when :math:`t' + dt' <= t + dt/2`.
"""
def __init__(self, dt, send=None, recv=None,
remote_dt=None, connection_timeout=None):
self.send_socket = send
self.recv_socket = recv
self.remote_dt = remote_dt
self.connection_timeout = connection_timeout
self.dt = dt
if remote_dt is None:
remote_dt = dt
# Cannot run faster than local dt
self.remote_dt = max(remote_dt, dt)
# State used by the step function
self.value = np.zeros(0 if self.recv_socket is None
else self.recv_socket.dims)
def __call__(self, t, x=None):
"""The step function run on each timestep.
When both sending and receiving, the sending frequency is
regulated by comparing the local and remote time steps. Information
is sent when the current local timestep is closer to the remote
time step than the next local timestep.
"""
if t <= 0.: # Nengo calling this function to figure out output size
return self.value
# Send must happen before receive to avoid deadlock situations, i.e.
# if both ends tried to receive first, both would block. Even with
# a timeout, the timestamps would not align to the expected timestamps
# anymore.
if self.send_socket is not None:
assert x is not None, "A sender must receive input"
self.send(t, x)
if self.recv_socket is not None:
self.recv(t)
return self.value
def __del__(self):
self.close()
def close(self):
if self.send_socket is not None:
self.send_socket.close()
if self.recv_socket is not None:
self.recv_socket.close()
def recv(self, t):
# Receive initial packet
if np.isnan(self.recv_socket.t):
try:
self.recv_socket.recv(self.connection_timeout)
except socket.timeout:
raise ConnectionTimeout(
"Did not receive initial packet within connection "
"timeout.")
self._update_value()
# Wait for packet that is not timestamped in the past
# (also skips receiving if we do not expect a new remote package yet)
while self.recv_socket.t < t - self.remote_dt / 2.:
self.recv_socket.recv(self.recv_socket.timeout)
# Use value if not in the future
if self.recv_socket.t < t + self.remote_dt / 2.:
self._update_value()
def _update_value(self):
# Value needs to be copied, otherwise it might be overwritten
# prematurely by a packet for a future timestep.
self.value = np.array(self.recv_socket.x)
def send(self, t, x):
# Calculate if it is time to send the next packet.
# Ideal time to send is the last sent time + remote_dt, and we
# want to find out if current or next local time step is closest.
if (np.isnan(self.send_socket.t) or
(t + self.dt / 2.) >= (self.send_socket.t + self.remote_dt)):
self.send_socket.send(t, x)
class UDPSendReceiveSocket(object):
"""A process for UDP communication to and from a Nengo model.
The *size_in* and *size_out* attributes determines the dimensions of the
sent and received data.
The incoming UDP packets are expected to start with the timestep followed
by the values for that timestep. Each value should be encoded as 8-byte
floating point number. The outgoing packets follow the same format.
A received packet will be used if its timestep is within within a window
with the width of *remote_dt* centered around the current time.
Parameters
----------
listen_addr : tuple
A tuple *(listen_interface, port)* denoting the local address to listen
on for incoming data.
remote_addr : tuple
A tuple *(host, port)* denoting the remote address to send data to
remote_dt : float, optional (Default: None)
The timestep of the remote simulation. Attempts to send and receive
data will be throttled to match this value if it exceeds the local
*dt*. If not given, it is assumed that the remote *dt* matches the
local *dt* (which is determined automatically).
connection_timeout : float, optional (Default: 300.)
Initial timeout when waiting to receive the initial package
establishing the connection.
recv_timeout : 2-tuple or float or None, optional (Default: 0.1)
Timout for socket receive operations in each timestep. If *None*, there
is no timeout (block until package is received). A float denotes a
fixed timeout. A 2-tuple gives a minimum and maximum timeout and the
timeout will be adjusted adaptively between these two values.
byte_order : str, optional (Default: '=')
Specify 'big' or 'little' endian data format.
Possible values: 'big', '>', 'little', '<', '='.
'=' uses the system default.
"""
def __init__(
self, listen_addr, remote_addr, remote_dt=None,
connection_timeout=300., recv_timeout=0.1, byte_order='='):
super(UDPSendReceiveSocket, self).__init__()
self.listen_addr = listen_addr
self.remote_addr = remote_addr
self.remote_dt = remote_dt
self.connection_timeout = connection_timeout
self.recv_timeout = recv_timeout
self.byte_order = byte_order
print("udpsendrecv init")
def make_step(self, input_dimensions, output_dimensions, dt):
recv = _UDPSocket(
self.listen_addr, output_dimensions, self.byte_order,
timeout=self.recv_timeout)
recv.open()
recv.bind()
send = _UDPSocket(self.remote_addr, input_dimensions, self.byte_order)
send.open()
print("make_Step")
return SocketStep(
dt=dt,
send=send, recv=recv,
remote_dt=self.remote_dt,
connection_timeout=self.connection_timeout)
class TCPcommandSocket(object):
def __init__(self,local_addr, remote_addr,remote_port,connection_timeout=300.):
self.local_addr =local_addr
self.remote_addr = remote_addr
self.remote_port = remote_port
self.connection_timeout = connection_timeout
self.list_status = ['start','pause','restart','stop','None']
self.sim_status=self.list_status[4]
def connect_host(self):
print("tcp connect");
command=0x00 # connect
connect_thread = threading.Thread(target=self.connect_thread_function,args=())
connect_thread.start()
def connect_thread_function(self):
self.send_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.send_sock.connect((self.remote_addr, self.remote_port))
self.send_sock.send(("<"+self.local_addr+"> "+("tcpconnection")).encode("utf-8"))
self.sim_status=self.list_status[0]
while True :
data= self.send_sock.recv(1024)
if not data :
pass
else :
data = data.decode('utf-8')
if data in self.list_status:
self.sim_status=data
print("sim status "+ self.sim_status)
else :
print("Invalid command ")
def CleanUP(self):
self.send_sock.close()
|
handler.py
|
# -*- coding: utf-8 -*-
import logging
import random
import threading
from copy import copy
from datetime import timedelta
from random import randint
from time import sleep
from typing import Dict, List, Optional
from urllib.parse import parse_qs, unquote, urlparse
import pytz
import requests
from django.conf import settings
from django.db import transaction
from django.db.transaction import get_connection
from django.utils import timezone, translation
from django.utils.translation import activate
from django.utils.translation import gettext as _
from online.models import (
TournamentGame,
TournamentGamePlayer,
TournamentNotification,
TournamentPlayers,
TournamentStatus,
)
from online.parser import TenhouParser
from player.models import Player
from tournament.models import OnlineTournamentRegistration
from utils.general import make_random_letters_and_digit_string
from utils.pantheon import add_tenhou_game_to_pantheon, add_user_to_pantheon, get_pantheon_swiss_sortition
from utils.tenhou.helper import parse_names_from_tenhou_chat_message
logger = logging.getLogger("tournament_bot")
class TournamentHandler:
# in minutes
TOURNAMENT_BREAKS_TIME = [5, 5, 5, 5, 5, 5, 30, 5, 5, 5, 5, 5, 5]
# TOURNAMENT_BREAKS_TIME = [5, 5, 30, 5, 5, 5]
TELEGRAM_DESTINATION = "tg"
DISCORD_DESTINATION = "ds"
tournament = None
lobby = None
game_type = None
total_games = None
destination = None
def init(self, tournament, lobby, game_type, destination):
self.tournament = tournament
self.lobby = lobby
self.game_type = game_type
self.destination = destination
TournamentStatus.objects.get_or_create(tournament=self.tournament)
def get_status(self):
return TournamentStatus.objects.get(tournament_id=self.tournament.id)
def get_tournament_status(self):
status = self.get_status()
if not status.current_round:
confirmed_players = TournamentPlayers.objects.filter(tournament=self.tournament).count()
if status.registration_closed:
return _("Games will start at 7-30 AM UTC. Confirmed players: %(confirmed_players)s.") % {
"confirmed_players": confirmed_players
}
else:
return _("Confirmation phase is in progress. Confirmed players: %(confirmed_players)s.") % {
"confirmed_players": confirmed_players
}
# if status.current_round == self.tournament.number_of_sessions:
# return _("The tournament is over. Thank you for participating!")
if status.end_break_time:
now = timezone.now()
if now > status.end_break_time:
return _("Games will start soon.")
minutes_dict = {
1: "минуту",
2: "минуты",
3: "минуты",
4: "минуты",
21: "минуту",
22: "минуты",
23: "минуты",
24: "минуты",
}
seconds_dict = {
1: "секунду",
2: "секунды",
3: "секунды",
4: "секунды",
21: "секунду",
22: "секунды",
23: "секунды",
24: "секунды",
31: "секунду",
32: "секунды",
33: "секунды",
34: "секунды",
41: "секунду",
42: "секунды",
43: "секунды",
44: "секунды",
51: "секунду",
52: "секунды",
53: "секунды",
54: "секунды",
}
delta = status.end_break_time - now
language = translation.get_language()
if delta.seconds > 60:
minutes = round(delta.seconds // 60 % 60, 2)
seconds = delta.seconds - minutes * 60
else:
minutes = None
seconds = delta.seconds
if language == "en":
if minutes:
date = "{} minutes {} seconds".format(minutes, seconds)
else:
date = "{} seconds".format(seconds)
else:
if minutes:
date = "{} {} и {} {}".format(
minutes, minutes_dict.get(minutes, "минут"), seconds, seconds_dict.get(seconds, "секунд")
)
else:
date = "{} {}".format(delta.seconds, seconds_dict.get(delta.seconds, "секунд"))
return _("Break. The next tour will start in %(date)s.") % {"date": date}
finished_games_count = (
TournamentGame.objects.filter(tournament=self.tournament)
.filter(tournament_round=status.current_round)
.filter(status=TournamentGame.FINISHED)
.count()
)
failed_games_count = (
TournamentGame.objects.filter(tournament=self.tournament)
.filter(tournament_round=status.current_round)
.filter(status=TournamentGame.FAILED_TO_START)
.count()
)
total_games = (
TournamentGame.objects.filter(tournament=self.tournament)
.filter(tournament_round=status.current_round)
.count()
)
message = _("Stage %(current_round)s of %(total_rounds)s.") % {
"current_round": status.current_round,
"total_rounds": self.tournament.number_of_sessions,
}
message += " "
message += _("Finished games: %(finished)s/%(total)s.") % {
"finished": finished_games_count,
"total": total_games,
}
if failed_games_count:
message += " "
message += _("Several games could not start. The administrator will fix this soon.")
return message
def open_registration(self):
status = self.get_status()
status.registration_closed = False
status.save()
self.create_notification(
TournamentNotification.CONFIRMATION_STARTED,
kwargs={"lobby_link": self.get_lobby_link(), "rating_link": self.get_rating_link()},
)
def close_registration(self):
status = self.get_status()
status.registration_closed = True
status.save()
confirmed_players = TournamentPlayers.objects.filter(tournament=self.tournament).count()
self.create_notification(
TournamentNotification.CONFIRMATION_ENDED,
{"lobby_link": self.get_lobby_link(), "confirmed_players": confirmed_players},
)
def send_team_names_to_pantheon(self):
registrations = TournamentPlayers.objects.filter(tournament=self.tournament)
team_names = {}
for registration in registrations:
team_names[registration.pantheon_id] = registration.team_name
data = {
"jsonrpc": "2.0",
"method": "updatePlayersTeams",
"params": {"eventId": settings.PANTHEON_EVENT_ID, "teamNameMap": team_names},
"id": make_random_letters_and_digit_string(),
}
headers = {"X-Auth-Token": settings.PANTHEON_ADMIN_TOKEN}
response = requests.post(settings.PANTHEON_URL, json=data, headers=headers)
if response.status_code == 500:
logger.error("Log add. Pantheon 500.")
return "Pantheon 500 error"
content = response.json()
if content.get("error"):
logger.error("Log add. Pantheon error. {}".format(content.get("error")))
return "Pantheon {} error".format(content.get("error"))
return "Готово"
def add_game_log(self, log_link):
status = self.get_status()
error_message = _("This is not looks like a link to the game log.")
log_link = log_link.replace("https://", "http://")
log_link = log_link.strip()
if not log_link.startswith("http://tenhou.net/"):
return error_message, False
attributes = parse_qs(urlparse(log_link).query)
if "log" not in attributes:
return error_message, False
log_id = attributes["log"][0]
if TournamentGame.objects.filter(log_id=log_id).exists():
return _("The game has been added. Thank you."), True
try:
parser = TenhouParser()
players = parser.get_player_names(log_id)
except Exception:
return (
_("Fail to add this game. Try again in a moment or you havn't copied the entire link of the game log."),
False,
)
with transaction.atomic():
cursor = get_connection().cursor()
cursor.execute(f"LOCK TABLE {TournamentGame._meta.db_table}")
try:
games = (
TournamentGame.objects.filter(tournament=self.tournament)
.filter(game_players__player__tenhou_username__in=players)
.filter(tournament_round=status.current_round)
.distinct()
)
error_message = _("Fail to add game log. Contact the administrator %(admin_username)s.") % {
"admin_username": self.get_admin_username()
}
if games.count() >= 2:
logger.error("Log add. Too much games.")
return error_message, False
game = games.first()
game.log_id = log_id
game.save()
finally:
cursor.close()
response = add_tenhou_game_to_pantheon(log_link)
if response.status_code == 500:
logger.error("Log add. Pantheon 500.")
return error_message, False
content = response.json()
if content.get("error"):
logger.error("Log add. Pantheon error. {}".format(content.get("error")))
return error_message, False
game_info = content["result"]["games"][0]
pantheon_url = f"https://gui.mjtop.net/eid{settings.PANTHEON_EVENT_ID}/game/{game_info['hash']}"
pantheon_players = {}
for pantheon_player_id in content["result"]["players"].keys():
pantheon_players[pantheon_player_id] = {
"id": pantheon_player_id,
"tenhou_nickname": content["result"]["players"][pantheon_player_id]["tenhou_id"],
"pantheon_name": content["result"]["players"][pantheon_player_id]["display_name"],
"score": 0,
"place": 0,
"rating_delta": 0,
}
for pantheon_player_id in game_info["final_results"].keys():
pantheon_players[pantheon_player_id]["score"] = game_info["final_results"][pantheon_player_id]["score"]
pantheon_players[pantheon_player_id]["place"] = game_info["final_results"][pantheon_player_id]["place"]
pantheon_players[pantheon_player_id]["rating_delta"] = game_info["final_results"][pantheon_player_id][
"rating_delta"
]
formatted_players = []
players_info = sorted(pantheon_players.values(), key=lambda x: x["place"])
# align strings and format scores
max_nick_length = max([len(x["tenhou_nickname"]) for x in players_info])
max_scores_length = max([len(str(x["score"])) for x in players_info])
for player_info in players_info:
try:
player_record = Player.objects.get(pantheon_id=player_info["id"])
display_name = f"{player_record.last_name_ru} ({player_record.last_name_en})"
except Player.DoesNotExist:
display_name = player_info["pantheon_name"]
tenhou_nickname = player_info["tenhou_nickname"].ljust(max_nick_length, " ")
scores = str(player_info["score"]).rjust(max_scores_length, " ")
rating_delta = player_info["rating_delta"]
if rating_delta > 0:
rating_delta = f"+{rating_delta}"
formatted_players.append(
f"{player_info['place']}. {display_name}\n{tenhou_nickname} {scores} ({rating_delta})"
)
game.status = TournamentGame.FINISHED
game.save()
finished_games = (
TournamentGame.objects.filter(tournament=self.tournament)
.filter(status=TournamentGame.FINISHED)
.filter(tournament_round=status.current_round)
)
total_games = (
TournamentGame.objects.filter(tournament=self.tournament)
.filter(tournament_round=status.current_round)
.count()
)
self.create_notification(
TournamentNotification.GAME_ENDED,
kwargs={
"finished": finished_games.count(),
"total": total_games,
"pantheon_link": pantheon_url,
"tenhou_link": f"http://tenhou.net/0/?log={log_id}",
"player_one": formatted_players[0],
"player_two": formatted_players[1],
"player_three": formatted_players[2],
"player_four": formatted_players[3],
},
)
self.check_round_was_finished()
return _("The game has been added. Thank you."), True
def confirm_participation_in_tournament(self, tenhou_nickname, telegram_username=None, discord_username=None):
status = self.get_status()
if status.registration_closed:
return _("The confirmation phase has already ended. Visit our next tournaments.")
if len(tenhou_nickname) > 8:
return _("The tenhou.net nickname must not be longer than eight characters.")
try:
registration = OnlineTournamentRegistration.objects.get(
tenhou_nickname__iexact=tenhou_nickname, tournament=self.tournament
)
except OnlineTournamentRegistration.DoesNotExist:
return _("You need to register for the tournament on mahjong.click first.")
if TournamentPlayers.objects.filter(
tenhou_username__iexact=tenhou_nickname, tournament=self.tournament
).exists():
return _('Nickname "%(tenhou_nickname)s" was already confirmed for this tournament.') % {
"tenhou_nickname": tenhou_nickname
}
pantheon_id = registration.player and registration.player.pantheon_id or None
team_name = registration.notes
record = TournamentPlayers.objects.create(
telegram_username=telegram_username,
discord_username=discord_username,
tenhou_username=tenhou_nickname,
tournament=self.tournament,
pantheon_id=pantheon_id,
team_name=team_name,
)
try:
add_user_to_pantheon(record)
except Exception as e:
logger.error(e, exc_info=e)
return _("Your participation in the tournament has been confirmed!")
def prepare_next_round(self):
status = self.get_status()
if not status.current_round:
status.current_round = 0
if status.current_round >= self.tournament.number_of_sessions:
return "Невозможно запустить новые игры. У турнира закончились туры."
current_games = TournamentGame.objects.filter(tournament=self.tournament).exclude(
status=TournamentGame.FINISHED
)
if current_games.exists():
return "Невозможно запустить новые игры. Старые игры ещё не завершились."
confirmed_players = TournamentPlayers.objects.filter(tournament=self.tournament)
missed_id = confirmed_players.filter(pantheon_id=None)
if missed_id.exists():
return "Невозможно запустить новые игры. Не у всех игроков стоит pantheon id."
with transaction.atomic():
status.current_round += 1
status.end_break_time = None
pantheon_ids = {}
for confirmed_player in confirmed_players:
pantheon_ids[confirmed_player.pantheon_id] = confirmed_player
sortition = self.make_sortition(list(pantheon_ids.keys()), status.current_round)
# from online.team_seating import TeamSeating
# sortition = TeamSeating.get_seating_for_round(status.current_round)
games = []
for game_index, item in enumerate(sortition):
logger.info(f"Preparing table with player_ids={item}")
# shuffle player winds
random.shuffle(item)
try:
game = TournamentGame.objects.create(
status=TournamentGame.NEW,
tournament=self.tournament,
tournament_round=status.current_round,
game_index=game_index + 1,
)
for wind in range(0, len(item)):
player_id = pantheon_ids[item[wind]].id
TournamentGamePlayer.objects.create(game=game, player_id=player_id, wind=wind)
games.append(game)
except Exception as e:
logger.error("Failed to prepare a game. Pantheon ids={}".format(item), exc_info=e)
# we was able to generate games
if games:
status.save()
# for users
self.create_notification(
TournamentNotification.GAMES_PREPARED,
{"current_round": status.current_round, "total_rounds": self.tournament.number_of_sessions},
)
# for admin
message = "Тур {}. Игры сформированы.".format(status.current_round)
else:
message = "Игры не запустились. Требуется вмешательство администратора."
return message
def make_sortition(self, pantheon_ids, current_round):
if current_round == 1:
return self._random_sortition(pantheon_ids)
else:
return get_pantheon_swiss_sortition()
def start_games(self):
status = self.get_status()
games = TournamentGame.objects.filter(tournament=self.tournament).filter(tournament_round=status.current_round)
for game in games:
self.start_game(game)
def start_game(self, game):
"""
Send request to tenhou.net to start a new game in the tournament lobby
"""
players = game.game_players.all().order_by("wind")
player_names = [x.player.tenhou_username for x in players]
escaped_player_names = [f"`{x.player.tenhou_username}`" for x in players]
url = "https://tenhou.net/cs/edit/cmd_start.cgi"
data = {
"L": self.lobby,
"R2": self.game_type,
"RND": "default",
"WG": 1,
"M": "\r\n".join([x for x in player_names]),
}
headers = {
"Origin": "http://tenhou.net",
"Content-Type": "application/x-www-form-urlencoded",
"Referer": "http://tenhou.net/cs/edit/?{}".format(self.lobby),
}
try:
response = requests.post(url, data=data, headers=headers, allow_redirects=False)
result = unquote(response.content.decode("utf-8"))
if result.startswith("FAILED"):
logger.error(result)
game.status = TournamentGame.FAILED_TO_START
self.create_notification(
TournamentNotification.GAME_FAILED,
kwargs={"players": ", ".join(escaped_player_names), "game_index": game.game_index},
)
elif result.startswith("MEMBER NOT FOUND"):
missed_player_ids = [x for x in result.split("\r\n")[1:] if x]
missed_player_objects = TournamentPlayers.objects.filter(tenhou_username__in=missed_player_ids).filter(
tournament=self.tournament
)
missed_players_str = self.get_players_message_string(missed_player_objects)
game.status = TournamentGame.FAILED_TO_START
self.create_notification(
TournamentNotification.GAME_FAILED_NO_MEMBERS,
kwargs={
"players": ", ".join(escaped_player_names),
"missed_players": missed_players_str,
"lobby_link": self.get_lobby_link(),
"game_index": game.game_index,
},
)
else:
game.status = TournamentGame.STARTED
self.create_notification(
TournamentNotification.GAME_STARTED,
kwargs={"players": ", ".join(escaped_player_names), "game_index": game.game_index},
)
except Exception as e:
logger.error(e, exc_info=e)
game.status = TournamentGame.FAILED_TO_START
self.create_notification(
TournamentNotification.GAME_FAILED, kwargs={"players": ", ".join(escaped_player_names)}
)
game.save()
def check_round_was_finished(self):
status = self.get_status()
finished_games = (
TournamentGame.objects.filter(tournament=self.tournament)
.filter(status=TournamentGame.FINISHED)
.filter(tournament_round=status.current_round)
)
games = TournamentGame.objects.filter(tournament=self.tournament).filter(tournament_round=status.current_round)
if finished_games.count() == games.count() and not status.end_break_time:
if status.current_round == self.tournament.number_of_sessions:
self.create_notification(TournamentNotification.TOURNAMENT_FINISHED)
else:
index = status.current_round - 1
break_minutes = self.TOURNAMENT_BREAKS_TIME[index]
status.end_break_time = timezone.now() + timedelta(minutes=break_minutes)
status.save()
self.create_notification(
TournamentNotification.ROUND_FINISHED,
{
"break_minutes": break_minutes,
"lobby_link": self.get_lobby_link(),
"current_round": status.current_round + 1,
"total_rounds": self.tournament.number_of_sessions,
},
)
else:
return None
def new_tg_chat_member(self, username: str):
status = self.get_status()
if not status.current_round:
message = "Добро пожаловать в чат онлайн турнира! \n"
if not username:
message += (
"Для начала установите username в настройках телеграма (Settings -> Username). "
"Инструкция: http://telegramzy.ru/nik-v-telegramm/ \n"
)
message += 'После этого отправьте команду "`/me ваш ник на тенхе`" для подтверждения участия.'
else:
message += 'Для подтверждения участия отправьте команду "`/me ваш ник на тенхе`" (регистр важен!)'
return message
else:
message = "Добро пожаловать в чат онлайн турнира! \n\n"
message += "Статистику турнира можно посмотреть вот тут: {} \n".format(self.get_rating_link())
return message
def create_notification(self, notification_type: int, kwargs: Optional[Dict] = None):
if not kwargs:
kwargs = {}
with transaction.atomic():
TournamentNotification.objects.create(
tournament=self.tournament,
notification_type=notification_type,
message_kwargs=kwargs,
destination=TournamentNotification.DISCORD,
)
TournamentNotification.objects.create(
tournament=self.tournament,
notification_type=notification_type,
message_kwargs=kwargs,
destination=TournamentNotification.TELEGRAM,
)
def get_notification_text(
self, lang: str, notification: TournamentNotification, extra_kwargs: Optional[dict] = None
):
activate(lang)
status = self.get_status()
kwargs = copy(notification.message_kwargs)
if extra_kwargs:
kwargs.update(extra_kwargs)
if self.destination == self.DISCORD_DESTINATION:
# this will disable links preview for discord messages
for key, value in kwargs.items():
if type(value) == str and value.startswith("http"):
kwargs[key] = f"<{value}>"
if notification.notification_type == TournamentNotification.CONFIRMATION_STARTED:
if self.destination == self.TELEGRAM_DESTINATION:
return (
"Начался этап подтверждения участия! "
'Для подтверждения своего участия отправьте команду "`/me ваш ник на тенхе`" (регистр важен!). '
"Этап завершится в 10-20 (МСК).\n\n"
"Полезные ссылки:\n"
"- турнирное лобби: %(lobby_link)s\n"
"- турнирный рейтинг: %(rating_link)s\n"
) % kwargs
if self.destination == self.DISCORD_DESTINATION:
return (
_(
"Confirmation stage has begun! "
"To confirm your tournament participation go to %(confirmation_channel)s "
"and send your tenhou.net nickname. "
"Confirmation stage will be ended at 7-20 UTC time.\n\n"
"Useful links:\n"
"- tournament lobby: %(lobby_link)s\n"
"- tournament rating table: %(rating_link)s"
)
% kwargs
)
if notification.notification_type == TournamentNotification.ROUND_FINISHED:
if self.destination == TournamentHandler.DISCORD_DESTINATION:
kwargs["break_end"] = status.end_break_time.astimezone(pytz.UTC).strftime("%H-%M")
if self.destination == TournamentHandler.TELEGRAM_DESTINATION:
kwargs["break_end"] = status.end_break_time.astimezone(pytz.timezone("Europe/Moscow")).strftime("%H-%M")
return (
_(
"All games finished. Next round %(current_round)s (of %(total_rounds)s) "
"starts in %(break_minutes)s minutes at %(break_end)s UTC.\n\n"
"Tournament lobby: %(lobby_link)s"
)
% kwargs
)
messages = {
TournamentNotification.GAME_ENDED: _(
"New game was added.\n\n"
"Results:\n"
"```\n"
"%(player_one)s\n"
"%(player_two)s\n"
"%(player_three)s\n"
"%(player_four)s\n"
"```\n"
"Game link: %(pantheon_link)s\n\n"
"Tenhou link: %(tenhou_link)s\n\n"
"Finished games: %(finished)s/%(total)s."
),
TournamentNotification.CONFIRMATION_ENDED: _(
"Confirmation stage has ended, there are %(confirmed_players)s players. "
"Games starts in 10 minutes at 7-30 AM UTC. "
"Please, follow this link %(lobby_link)s to enter the tournament lobby. "
"Games will start automatically."
),
TournamentNotification.GAMES_PREPARED: _(
"Round %(current_round)s of %(total_rounds)s starts. "
"Tournament seating is ready.\n\n"
"Starting games...\n\n"
"After the game please send the game log link to the #game_logs channel. "
"The game log should be sent before the new round starts. "
"If there is no log when next round start, all players from this game "
"will get -30000 scores as a round result (their real scores will not be counted)."
),
TournamentNotification.GAME_FAILED: _(
"Game №%(game_index)s: %(players)s. Is not started. The table was moved to the end of the queue."
),
TournamentNotification.GAME_FAILED_NO_MEMBERS: _(
"Game №%(game_index)s: %(players)s. Is not started. Missed players %(missed_players)s. "
"The table was moved to the end of the queue. \n\n"
"Missed players please enter the tournament lobby: %(lobby_link)s."
),
TournamentNotification.GAME_STARTED: _("Game №%(game_index)s: %(players)s. Started."),
TournamentNotification.TOURNAMENT_FINISHED: _("The tournament is over. Thank you for participating!"),
TournamentNotification.GAME_PRE_ENDED: _("%(message)s\n\n"),
TournamentNotification.GAME_LOG_REMINDER: _(
"Players: %(player_names)s please send link to game log.\n\n"
"If there is no log when next round start, all players from this game "
"will get -30000 scores as a round result (their real scores will not be counted)."
),
TournamentNotification.GAME_PENALTY: _(
"Players: %(player_names)s you got -30000 penalty because of not sent link to game log."
),
}
return messages.get(notification.notification_type) % kwargs
def get_lobby_link(self):
return f"http://tenhou.net/0/?{settings.TOURNAMENT_PUBLIC_LOBBY}"
def get_rating_link(self):
return f"https://gui.mjtop.net/eid{settings.PANTHEON_EVENT_ID}/stat"
def get_admin_username(self):
if self.destination == self.TELEGRAM_DESTINATION:
return settings.TELEGRAM_ADMIN_USERNAME
if self.destination == self.DISCORD_DESTINATION:
return f"<@{settings.DISCORD_ADMIN_ID}>"
def game_pre_end(self, end_game_message: str):
status = self.get_status()
tenhou_nicknames = parse_names_from_tenhou_chat_message(end_game_message)
game = (
TournamentGame.objects.filter(tournament=self.tournament)
.filter(game_players__player__tenhou_username__in=tenhou_nicknames)
.filter(tournament_round=status.current_round)
.distinct()
.first()
)
if not game:
logger.error(f"Can't find game to finish. {tenhou_nicknames}")
return
game.status = TournamentGame.FINISHED
game.save()
self.create_notification(
TournamentNotification.GAME_PRE_ENDED,
kwargs={"message": unquote(end_game_message)},
)
# postpone reminder
thread = threading.Thread(target=self.send_log_reminder_message, args=(end_game_message,))
thread.daemon = True
thread.start()
self.check_round_was_finished()
def send_log_reminder_message(self, end_game_message: str):
# lets give some time for players before spamming with message
sleep(120)
tenhou_nicknames = parse_names_from_tenhou_chat_message(end_game_message)
players = TournamentPlayers.objects.filter(tenhou_username__in=tenhou_nicknames).filter(
tournament=self.tournament
)
status = self.get_status()
game = (
TournamentGame.objects.filter(tournament=self.tournament)
.filter(game_players__player__tenhou_username__in=tenhou_nicknames)
.filter(tournament_round=status.current_round)
.filter(log_id__isnull=True)
.first()
)
# players already submitted game log
if not game:
return
self.create_notification(
TournamentNotification.GAME_LOG_REMINDER,
kwargs={"player_names": self.get_players_message_string(players)},
)
def get_players_message_string(self, players: List[TournamentPlayers]):
player_names = []
for player in players:
if player.telegram_username:
player_names.append(f"@{player.telegram_username} ({TournamentHandler.TELEGRAM_DESTINATION})")
if player.discord_username:
player_names.append(f"@{player.discord_username} ({TournamentHandler.DISCORD_DESTINATION})")
return ", ".join(player_names)
def _random_sortition(self, pantheon_ids):
# default random.shuffle function doesn't produce good results
# so, let's use our own shuffle implementation
for i in range(len(pantheon_ids)):
swap = randint(0, len(pantheon_ids) - 1)
temp = pantheon_ids[swap]
pantheon_ids[swap] = pantheon_ids[i]
pantheon_ids[i] = temp
return list(self._split_to_chunks(pantheon_ids))
def _split_to_chunks(self, items):
n = 4
for i in range(0, len(items), n):
yield items[i : i + n]
|
face_train_controller_node.py
|
#!/usr/bin/env python
# Copyright (c) 2018, The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from absl import flags
from absl import logging
import grpc
import Queue
import signal
import sys
import time
import threading
import rospy
import std_msgs.msg as std_msgs
import cogrob_face_msgs.msg as face_msgs
import cogrob_face_msgs.srv as face_srvs
from cogrob_face_protos.proto.facedb_proto import facedb_service_pb2
from cogrob_face_protos.proto.facedb_proto import facedb_service_pb2_grpc
from cogrob_face_protos.proto.humandb_proto import humandb_record_pb2
from cogrob_face_protos.proto.humandb_proto import humandb_service_pb2
from cogrob_face_protos.proto.humandb_proto import humandb_service_pb2_grpc
from cogrob_face_protos.proto.util import uuid_pb2
logging.set_verbosity(logging.INFO)
FLAGS = flags.FLAGS
flags.DEFINE_string("facedb_server", "localhost:7009",
"FaceDB Server host:port")
flags.DEFINE_string("humandb_server", "localhost:7010",
"HumanDB Server host:port")
flags.DEFINE_integer("embedding_alive_secs", 60, "Embeeding alive lifespan.")
flags.DEFINE_integer("embedding_sufficient_threshold", 15,
"Sufficent embeeding count threshold.")
class WaitToTerminate:
def __init__(self):
self._kill_now = False
signal.signal(signal.SIGINT, self.ExitGracefully)
signal.signal(signal.SIGTERM, self.ExitGracefully)
while not self._kill_now:
signal.pause()
def ExitGracefully(self, signum, frame):
self._kill_now = True
def UuidPbToUuidMsg(uuid_pb):
uuid_msg = face_msgs.Uuid()
uuid_msg.most_significant_bits = uuid_pb.most_significant_bits
uuid_msg.least_significant_bits = uuid_pb.least_significant_bits
return uuid_msg
def UuidPbIsZero(uuid_pb):
return (uuid_pb.most_significant_bits == 0
and uuid_pb.least_significant_bits == 0)
def PopulateHumanInformationMsgWithProto(dest, humandb_record_pb):
dest.human_uuid = UuidPbToUuidMsg(humandb_record_pb.human_uuid)
dest.human_uuid_aliases = map(UuidPbToUuidMsg,
humandb_record_pb.human_uuid_aliases)
dest.human_labels = list(humandb_record_pb.human_labels)
dest.created_timestamp = rospy.Time.from_sec(
humandb_record_pb.created_timestamp)
dest.modified_timestamp = map(rospy.Time.from_sec,
humandb_record_pb.modified_timestamp)
dest.nicknames = list(humandb_record_pb.nicknames)
dest.facedb_uuids = map(UuidPbToUuidMsg, humandb_record_pb.facedb_uuids)
class OpenFaceFaceDbHumanDbTrainningController(object):
def __init__(self):
facedb_channel = grpc.insecure_channel(FLAGS.facedb_server)
self._facedb_stub = facedb_service_pb2_grpc.FaceDbServiceStub(
facedb_channel)
humandb_channel = grpc.insecure_channel(FLAGS.humandb_server)
self._humandb_stub = humandb_service_pb2_grpc.HumanDbServiceStub(
humandb_channel)
self._embedding_msg_queue = Queue.PriorityQueue()
self._openface_embedding_sub = rospy.Subscriber(
"/cogrob/detected_openface_embedding",
face_msgs.DetectedOpenFaceEmbedding, self._EmbeddingTopicCallback)
self._label_seen_person_sub = rospy.Subscriber(
"/cogrob/label_seen_person",
std_msgs.String, self._LabelSeenPersonCallback)
self._add_new_human_with_recent_face_srv = rospy.Service(
"/cogrob/add_new_human_with_recent_face",
face_srvs.AddNewHumanWithRecentFace,
self._AddNewHumanWithRecentFaceSrvCallback)
self._clear_embedding_cache_srv = rospy.Service(
"/cogrob/clear_face_embedding_cache",
face_srvs.ClearEmbeddingCache,
self._ClearEmbeddingCacheCallback)
self._get_available_embedding_count_srv = rospy.Service(
"/cogrob/get_available_face_embedding_count",
face_srvs.GetAvailableEmbeddingCount,
self._GetAvailableEmbeddingCountCallback)
def _LabelSeenPersonCallback(self, msg):
self._Train(labels=[msg.data])
def _AddNewHumanWithRecentFaceSrvCallback(self, req):
humandb_uuid = self._Train(req.human_labels, req.nicknames)
result = face_srvs.AddNewHumanWithRecentFaceResponse()
if humandb_uuid is None:
result.error = True
else:
result.human_uuid = UuidPbToUuidMsg(humandb_uuid)
return result
def _Train(self, labels=None, nicknames=None):
if labels is None:
labels = []
if nicknames is None:
nicknames = []
# For now, we always create a new person in HumanDB and tag him/her with a
# label or nickname.
print("Got a trainning request, labels are {}, nicknames are {}".format(
labels, nicknames))
self._ClearExpiredEmbeddings()
# Takes a snapshot of the current embedding msg queue.
# This is not listed in API page, but it works. There could be some
# concurrency issue, but rospy is single-threaded. For simplicity use this
# for now.
trainning_samples = map(lambda x:x[1], self._embedding_msg_queue.queue)
if len(trainning_samples) == 0:
print("Trainning rejected, no samples")
return None
else:
print("Trainning on {} samples.".format(len(trainning_samples)))
# First, ask FaceDB to generate a face UUID for this human.
facedb_request = facedb_service_pb2.RegisterRequest()
for embedding_msg in trainning_samples:
facedb_request.embeddings.add().embedding.extend(embedding_msg.embedding)
# Contact the FaceDB and register the face.
# TODO(shengye): Set a deadline for FaceDB and HumanDB. We can't block
# forever on these requests.
print("Register with FaceDB, request:{}".format(str(facedb_request)))
try:
print("Register with FaceDB, request:{}".format(str(facedb_request)))
facedb_response = self._facedb_stub.Register(facedb_request)
print("Registered with FaceDB, response: {}".format(str(facedb_response)))
except Exception as e:
print(e)
logging.error(e)
return None
# Now contact HumanDB and create a new human entry.
humandb_request = humandb_service_pb2.CreateOrAppendRequest()
humandb_record = humandb_request.record
humandb_record.human_labels.extend(labels)
humandb_record.nicknames.extend(nicknames)
humandb_record.facedb_uuids.add().CopyFrom(facedb_response.facedb_uuid)
try:
humandb_response = self._humandb_stub.CreateOrAppend(humandb_request)
except Exception as e:
print(e)
logging.error(e)
return None
return humandb_response.result.human_uuid
def _ClearExpiredEmbeddings(self):
try:
oldest_img = self._embedding_msg_queue.get_nowait()
while (rospy.Time.now() - oldest_img[1].src_image_header.stamp >
rospy.Duration(FLAGS.embedding_alive_secs)):
oldest_img = self._embedding_msg_queue.get_nowait()
# get() removes the item from the queue, so we need to put it back.
self._embedding_msg_queue.put(oldest_img)
except Queue.Empty as e:
# The queue is empty, it is ok.
return
def _ClearAllEmbeddings(self):
while True:
try:
# Drain the queue
self._embedding_msg_queue.get_nowait()
except Queue.Empty as e:
break
return True
def _GetAvailableEmbeddingCount(self):
return self._embedding_msg_queue.qsize()
def _GetAvailableEmbeddingCountCallback(self, req):
del req
result = face_srvs.GetAvailableEmbeddingCountResponse()
result.count = self._GetAvailableEmbeddingCount()
if result.count >= FLAGS.embedding_sufficient_threshold:
result.is_sufficient = True
else:
result.is_sufficient = False
return result
def _ClearEmbeddingCacheCallback(self, req):
del req
result = face_srvs.ClearEmbeddingCacheResponse()
result.is_success = self._ClearAllEmbeddings()
return result
def _EmbeddingTopicCallback(self, data):
self._embedding_msg_queue.put((data.src_image_header.stamp, data))
self._ClearExpiredEmbeddings()
def main(argv):
FLAGS(argv)
logging.info("face_train_controller_node started.")
rospy.init_node('face_train_controller_node')
controller = OpenFaceFaceDbHumanDbTrainningController()
rospy_spin_thread = threading.Thread(target=rospy.spin)
rospy_spin_thread.daemon = True
rospy_spin_thread.start()
WaitToTerminate()
if __name__ == '__main__':
main(sys.argv)
|
app_cn.py
|
import os
import re
import math
import sys
import shutil
import json
import traceback
import PIL.Image as PilImage
import threading
import tkinter as tk
from tkinter import messagebox
from tkinter import ttk
from tkinter import filedialog
from constants import *
from config import ModelConfig, OUTPUT_SHAPE1_MAP, NETWORK_MAP, DataAugmentationEntity, PretreatmentEntity, get_version
from make_dataset import DataSets
from predict_testing import Predict
from trains import Trains
from category import category_extract, SIMPLE_CATEGORY_MODEL
from utils.category_frequency_statistics import fetch_category_list
from gui.utils import LayoutGUI
from gui.data_augmentation import DataAugmentationDialog
from gui.pretreatment import PretreatmentDialog
NOT_EDITABLE_MSG = "只支持从文件中修改"
class Wizard:
job: threading.Thread
current_task: Trains = None
is_task_running: bool = False
data_augmentation_entity = DataAugmentationEntity()
pretreatment_entity = PretreatmentEntity()
extract_regex = ".*?(?=_)"
label_split = ""
model_conf: ModelConfig = None
def __init__(self, parent: tk.Tk):
self.layout = {
'global': {
'start': {'x': 15, 'y': 20},
'space': {'x': 15, 'y': 25},
'tiny_space': {'x': 5, 'y': 10}
}
}
self.parent = parent
self.parent.iconbitmap(Wizard.resource_path("resource/icon.ico"))
self.current_project: str = ""
self.project_root_path = "./projects"
if not os.path.exists(self.project_root_path):
os.makedirs(self.project_root_path)
self.parent.title('Eve-深度训练框架 v1({})'.format(get_version()))
self.parent.resizable(width=False, height=False)
self.window_width = 815
self.window_height = 700
self.layout_utils = LayoutGUI(self.layout, self.window_width)
screenwidth = self.parent.winfo_screenwidth()
screenheight = self.parent.winfo_screenheight()
size = '%dx%d+%d+%d' % (
self.window_width,
self.window_height,
(screenwidth - self.window_width) / 2,
(screenheight - self.window_height) / 2
)
self.parent.bind('<Button-1>', lambda x: self.blank_click(x))
s = ttk.Style()
s.configure('my.TButton', font=('simsun', 10))
# ============================= Menu 1 =====================================
self.menubar = tk.Menu(self.parent, font=("simsun", 10))
self.data_menu = tk.Menu(self.menubar, tearoff=False, font=("simsun", 10))
self.help_menu = tk.Menu(self.menubar, tearoff=False, font=("simsun", 10))
self.system_menu = tk.Menu(self.menubar, tearoff=False, font=("simsun", 10))
self.edit_var = tk.DoubleVar()
self.label_from_var = tk.StringVar()
self.memory_usage_menu = tk.Menu(self.menubar, tearoff=False)
self.memory_usage_menu.add_radiobutton(label="50%", variable=self.edit_var, value=0.5)
self.memory_usage_menu.add_radiobutton(label="60%", variable=self.edit_var, value=0.6)
self.memory_usage_menu.add_radiobutton(label="70%", variable=self.edit_var, value=0.7)
self.memory_usage_menu.add_radiobutton(label="80%", variable=self.edit_var, value=0.8)
self.label_from_menu = tk.Menu(self.menubar, tearoff=False, font=("simsun", 10))
self.label_from_menu.add_radiobutton(label="文件名", variable=self.label_from_var, value='FileName')
self.label_from_menu.add_radiobutton(label="文本", variable=self.label_from_var, value='TXT')
self.menubar.add_cascade(label="系统", menu=self.system_menu)
self.system_menu.add_cascade(label="显存占用率", menu=self.memory_usage_menu)
self.data_menu.add_command(label="数据增强", command=lambda: self.popup_data_augmentation())
self.data_menu.add_command(label="预处理", command=lambda: self.popup_pretreatment())
self.data_menu.add_separator()
self.data_menu.add_command(label="重置打包数据集", command=lambda: self.clear_dataset())
self.data_menu.add_separator()
self.data_menu.add_cascade(label="标注源", menu=self.label_from_menu)
self.data_menu.add_command(label="一键获取分类", command=lambda: self.fetch_category())
self.menubar.add_cascade(label="数据", menu=self.data_menu)
self.help_menu.add_command(label="关于", command=lambda: self.popup_about())
self.menubar.add_cascade(label="帮助", menu=self.help_menu)
self.parent.config(menu=self.menubar)
# ============================= Group 1 =====================================
self.label_frame_source = ttk.Labelframe(self.parent, text='样本源')
self.label_frame_source.place(
x=self.layout['global']['start']['x'],
y=self.layout['global']['start']['y'],
width=790,
height=150
)
# 训练集源路径 - 标签
self.dataset_train_path_text = ttk.Label(self.parent, font=("simsun", 10), text='训练集路径', anchor=tk.W)
self.layout_utils.inside_widget(
src=self.dataset_train_path_text,
target=self.label_frame_source,
width=90,
height=20
)
# 训练集源路径 - 输入控件
self.source_train_path_listbox = tk.Listbox(self.parent, font=('微软雅黑', 9))
self.layout_utils.next_to_widget(
src=self.source_train_path_listbox,
target=self.dataset_train_path_text,
width=600,
height=50,
tiny_space=True
)
self.source_train_path_listbox.bind(
sequence="<Delete>",
func=lambda x: self.listbox_delete_item_callback(x, self.source_train_path_listbox)
)
self.listbox_scrollbar(self.source_train_path_listbox)
# 训练集源路径 - 按钮
self.btn_browse_train = ttk.Button(
self.parent, style='my.TButton', text='浏览',
command=lambda: self.browse_dataset(DatasetType.Directory, RunMode.Trains)
)
self.layout_utils.next_to_widget(
src=self.btn_browse_train,
target=self.source_train_path_listbox,
width=60,
height=24,
tiny_space=True
)
# 验证集源路径 - 标签
label_edge = self.layout_utils.object_edge_info(self.dataset_train_path_text)
widget_edge = self.layout_utils.object_edge_info(self.source_train_path_listbox)
self.dataset_validation_path_text = ttk.Label(self.parent, font=("simsun", 10), text='验证集路径', anchor=tk.W)
self.dataset_validation_path_text.place(
x=label_edge['x'],
y=widget_edge['edge_y'] + self.layout['global']['space']['y'] / 2,
width=90,
height=20
)
# 验证集源路径 - 输入控件
self.source_validation_path_listbox = tk.Listbox(self.parent, font=('微软雅黑', 9))
self.layout_utils.next_to_widget(
src=self.source_validation_path_listbox,
target=self.dataset_validation_path_text,
width=600,
height=50,
tiny_space=True
)
self.source_validation_path_listbox.bind(
sequence="<Delete>",
func=lambda x: self.listbox_delete_item_callback(x, self.source_validation_path_listbox)
)
self.listbox_scrollbar(self.source_validation_path_listbox)
# 训练集源路径 - 按钮
self.btn_browse_validation = ttk.Button(
self.parent, style='my.TButton', text='浏览',
command=lambda: self.browse_dataset(DatasetType.Directory, RunMode.Validation)
)
self.layout_utils.next_to_widget(
src=self.btn_browse_validation,
target=self.source_validation_path_listbox,
width=60,
height=24,
tiny_space=True
)
# ============================= Group 2 =====================================
self.label_frame_neu = ttk.Labelframe(self.parent, text='神经网络')
self.layout_utils.below_widget(
src=self.label_frame_neu,
target=self.label_frame_source,
width=790,
height=120,
tiny_space=False
)
# 最大标签数目 - 标签
self.label_num_text = ttk.Label(self.parent, font=("simsun", 10), text='标签数', anchor=tk.W)
self.layout_utils.inside_widget(
src=self.label_num_text,
target=self.label_frame_neu,
width=65,
height=20,
)
# 最大标签数目 - 滚动框
self.label_num_spin = ttk.Spinbox(self.parent, from_=1, to=12)
self.label_num_spin.set(1)
self.layout_utils.next_to_widget(
src=self.label_num_spin,
target=self.label_num_text,
width=50,
height=20,
tiny_space=True
)
# 图像通道 - 标签
self.channel_text = ttk.Label(self.parent, font=("simsun", 10), text='通道', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.channel_text,
target=self.label_num_spin,
width=50,
height=20,
tiny_space=False
)
# 图像通道 - 下拉框
self.comb_channel = ttk.Combobox(self.parent, values=(3, 1), state='readonly')
self.comb_channel.current(1)
self.layout_utils.next_to_widget(
src=self.comb_channel,
target=self.channel_text,
width=38,
height=20,
tiny_space=True
)
# 卷积层 - 标签
self.neu_cnn_text = ttk.Label(self.parent, font=("simsun", 10), text='卷积层', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.neu_cnn_text,
target=self.comb_channel,
width=65,
height=20,
tiny_space=False
)
# 卷积层 - 下拉框
self.comb_neu_cnn = ttk.Combobox(self.parent, values=[_.name for _ in CNNNetwork], state='readonly')
self.comb_neu_cnn.current(0)
self.layout_utils.next_to_widget(
src=self.comb_neu_cnn,
target=self.neu_cnn_text,
width=80,
height=20,
tiny_space=True
)
# 循环层 - 标签
self.neu_recurrent_text = ttk.Label(self.parent, font=("simsun", 10), text='循环层', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.neu_recurrent_text,
target=self.comb_neu_cnn,
width=95,
height=20,
tiny_space=False
)
# 循环层 - 下拉框
self.comb_recurrent = ttk.Combobox(self.parent, values=[_.name for _ in RecurrentNetwork], state='readonly')
self.comb_recurrent.current(1)
self.layout_utils.next_to_widget(
src=self.comb_recurrent,
target=self.neu_recurrent_text,
width=112,
height=20,
tiny_space=True
)
self.comb_recurrent.bind("<<ComboboxSelected>>", lambda x: self.auto_loss(x))
# 循环层单元数 - 标签
self.units_num_text = ttk.Label(self.parent, font=("simsun", 10), text='单元数', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.units_num_text,
target=self.comb_recurrent,
width=60,
height=20,
tiny_space=False
)
# 循环层单元数 - 下拉框
self.units_num_spin = ttk.Spinbox(self.parent, from_=16, to=512, increment=16, wrap=True)
self.units_num_spin.set(64)
self.layout_utils.next_to_widget(
src=self.units_num_spin,
target=self.units_num_text,
width=55,
height=20,
tiny_space=True
)
# 损失函数 - 标签
self.loss_func_text = ttk.Label(self.parent, font=("simsun", 10), text='损失函数', anchor=tk.W)
self.layout_utils.below_widget(
src=self.loss_func_text,
target=self.label_num_text,
width=65,
height=20,
tiny_space=True
)
# 损失函数 - 下拉框
self.comb_loss = ttk.Combobox(self.parent, values=[_.name for _ in LossFunction], state='readonly')
self.comb_loss.current(1)
self.layout_utils.next_to_widget(
src=self.comb_loss,
target=self.loss_func_text,
width=101,
height=20,
tiny_space=True
)
# 优化器 - 标签
self.optimizer_text = ttk.Label(self.parent, font=("simsun", 10), text='优化器', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.optimizer_text,
target=self.comb_loss,
width=60,
height=20,
tiny_space=False
)
# 优化器 - 下拉框
self.comb_optimizer = ttk.Combobox(self.parent, values=[_.name for _ in Optimizer], state='readonly')
self.comb_optimizer.current(0)
self.layout_utils.next_to_widget(
src=self.comb_optimizer,
target=self.optimizer_text,
width=88,
height=20,
tiny_space=True
)
# 学习率 - 标签
self.learning_rate_text = ttk.Label(self.parent, font=("simsun", 10), text='学习率', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.learning_rate_text,
target=self.comb_optimizer,
width=50,
height=20,
tiny_space=False
)
# 学习率 - 滚动框
self.learning_rate_spin = ttk.Spinbox(self.parent, from_=0.00001, to=0.1, increment='0.0001')
self.learning_rate_spin.set(0.001)
self.layout_utils.next_to_widget(
src=self.learning_rate_spin,
target=self.learning_rate_text,
width=67,
height=20,
tiny_space=True
)
# Resize - 标签
self.resize_text = ttk.Label(self.parent, font=("simsun", 10), text='重置尺寸', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.resize_text,
target=self.learning_rate_spin,
width=60,
height=20,
tiny_space=False
)
# Resize - 输入框
self.resize_val = tk.StringVar()
self.resize_val.set('[150, 50]')
self.resize_entry = ttk.Entry(self.parent, textvariable=self.resize_val, justify=tk.LEFT)
self.layout_utils.next_to_widget(
src=self.resize_entry,
target=self.resize_text,
width=60,
height=20,
tiny_space=True
)
# Size - 标签
self.size_text = ttk.Label(self.parent, font=("simsun", 10), text='图片尺寸', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.size_text,
target=self.resize_entry,
width=60,
height=20,
tiny_space=False
)
# Size - 输入框
self.size_val = tk.StringVar()
self.size_val.set('[-1, -1]')
self.size_entry = ttk.Entry(self.parent, textvariable=self.size_val, justify=tk.LEFT)
self.layout_utils.next_to_widget(
src=self.size_entry,
target=self.size_text,
width=60,
height=20,
tiny_space=True
)
# 类别 - 标签
self.category_text = ttk.Label(self.parent, font=("simsun", 10), text='分类', anchor=tk.W)
self.layout_utils.below_widget(
src=self.category_text,
target=self.loss_func_text,
width=65,
height=20,
tiny_space=True
)
# 类别 - 下拉框
self.comb_category = ttk.Combobox(self.parent, values=(
'CUSTOMIZED',
'NUMERIC',
'ALPHANUMERIC',
'ALPHANUMERIC_LOWER',
'ALPHANUMERIC_UPPER',
'ALPHABET_LOWER',
'ALPHABET_UPPER',
'ALPHABET',
'ARITHMETIC',
'FLOAT',
'CHS_3500',
'ALPHANUMERIC_CHS_3500_LOWER',
'DOCUMENT_OCR'
), state='readonly')
self.comb_category.current(1)
self.comb_category.bind("<<ComboboxSelected>>", lambda x: self.comb_category_callback(x))
self.layout_utils.next_to_widget(
src=self.comb_category,
target=self.category_text,
width=225,
height=20,
tiny_space=True
)
# 类别 - 自定义输入框
self.category_val = tk.StringVar()
self.category_val.set('')
self.category_entry = ttk.Entry(self.parent, textvariable=self.category_val, justify=tk.LEFT, state=tk.DISABLED)
self.layout_utils.next_to_widget(
src=self.category_entry,
target=self.comb_category,
width=440,
height=20,
tiny_space=False
)
# ============================= Group 3 =====================================
self.label_frame_train = ttk.Labelframe(self.parent, text='训练配置')
self.layout_utils.below_widget(
src=self.label_frame_train,
target=self.label_frame_neu,
width=790,
height=60,
tiny_space=True
)
# 任务完成标准 - 准确率 - 标签
self.end_acc_text = ttk.Label(self.parent, font=("simsun", 10), text='结束准确率', anchor=tk.W)
self.layout_utils.inside_widget(
src=self.end_acc_text,
target=self.label_frame_train,
width=85,
height=20,
)
# 任务完成标准 - 准确率 - 输入框
self.end_acc_val = tk.DoubleVar()
self.end_acc_val.set(0.95)
self.end_acc_entry = ttk.Entry(self.parent, textvariable=self.end_acc_val, justify=tk.LEFT)
self.layout_utils.next_to_widget(
src=self.end_acc_entry,
target=self.end_acc_text,
width=56,
height=20,
tiny_space=True
)
# 任务完成标准 - 平均损失 - 标签
self.end_cost_text = ttk.Label(self.parent, font=("simsun", 10), text='结束Cost', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.end_cost_text,
target=self.end_acc_entry,
width=60,
height=20,
tiny_space=False
)
# 任务完成标准 - 平均损失 - 输入框
self.end_cost_val = tk.DoubleVar()
self.end_cost_val.set(0.5)
self.end_cost_entry = ttk.Entry(self.parent, textvariable=self.end_cost_val, justify=tk.LEFT)
self.layout_utils.next_to_widget(
src=self.end_cost_entry,
target=self.end_cost_text,
width=58,
height=20,
tiny_space=True
)
# 任务完成标准 - 循环轮次 - 标签
self.end_epochs_text = ttk.Label(self.parent, font=("simsun", 10), text='结束轮次', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.end_epochs_text,
target=self.end_cost_entry,
width=72,
height=20,
tiny_space=False
)
# 任务完成标准 - 循环轮次 - 输入框
self.end_epochs_spin = ttk.Spinbox(self.parent, from_=0, to=10000)
self.end_epochs_spin.set(2)
self.layout_utils.next_to_widget(
src=self.end_epochs_spin,
target=self.end_epochs_text,
width=50,
height=20,
tiny_space=True
)
# 训练批次大小 - 标签
self.batch_size_text = ttk.Label(self.parent, font=("simsun", 10), text='训练批次大小', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.batch_size_text,
target=self.end_epochs_spin,
width=90,
height=20,
tiny_space=False
)
# 训练批次大小 - 输入框
self.batch_size_val = tk.IntVar()
self.batch_size_val.set(64)
self.batch_size_entry = ttk.Entry(self.parent, textvariable=self.batch_size_val, justify=tk.LEFT)
self.layout_utils.next_to_widget(
src=self.batch_size_entry,
target=self.batch_size_text,
width=40,
height=20,
tiny_space=True
)
# 验证批次大小 - 标签
self.validation_batch_size_text = ttk.Label(self.parent, font=("simsun", 10), text='验证批次大小', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.validation_batch_size_text,
target=self.batch_size_entry,
width=120,
height=20,
tiny_space=False
)
# 验证批次大小 - 输入框
self.validation_batch_size_val = tk.IntVar()
self.validation_batch_size_val.set(300)
self.validation_batch_size_entry = ttk.Entry(self.parent, textvariable=self.validation_batch_size_val,
justify=tk.LEFT)
self.layout_utils.next_to_widget(
src=self.validation_batch_size_entry,
target=self.validation_batch_size_text,
width=40,
height=20,
tiny_space=True
)
# ============================= Group 5 =====================================
self.label_frame_project = ttk.Labelframe(self.parent, text='项目配置')
self.layout_utils.below_widget(
src=self.label_frame_project,
target=self.label_frame_train,
width=790,
height=60,
tiny_space=True
)
# 项目名 - 标签
self.project_name_text = ttk.Label(self.parent, font=("simsun", 10), text='项目名', anchor=tk.W)
self.layout_utils.inside_widget(
src=self.project_name_text,
target=self.label_frame_project,
width=90,
height=20
)
# 项目名 - 下拉输入框
self.comb_project_name = ttk.Combobox(self.parent)
self.layout_utils.next_to_widget(
src=self.comb_project_name,
target=self.project_name_text,
width=430,
height=20,
tiny_space=True
)
self.comb_project_name.bind(
sequence="<Return>",
func=lambda x: self.project_name_fill_callback(x)
)
self.comb_project_name.bind(
sequence="<Button-1>",
func=lambda x: self.fetch_projects()
)
def read_conf(event):
threading.Thread(target=self.read_conf).start()
self.comb_project_name.bind("<<ComboboxSelected>>", read_conf)
# 保存配置 - 按钮
self.btn_save_conf = ttk.Button(
self.parent, style='my.TButton', text='保存配置', command=lambda: self.save_conf()
)
self.layout_utils.next_to_widget(
src=self.btn_save_conf,
target=self.comb_project_name,
width=130,
height=24,
tiny_space=False,
offset_y=-2
)
# 删除项目 - 按钮
self.btn_delete = ttk.Button(
self.parent, style='my.TButton', text='删除', command=lambda: self.delete_project()
)
self.layout_utils.next_to_widget(
src=self.btn_delete,
target=self.btn_save_conf,
width=80,
height=24,
tiny_space=False,
)
# ============================= Group 6 =====================================
self.label_frame_dataset = ttk.Labelframe(
self.parent, text='样本数据集',
)
self.layout_utils.below_widget(
src=self.label_frame_dataset,
target=self.label_frame_project,
width=790,
height=170,
tiny_space=True
)
# 附加训练集 - 按钮
self.btn_attach_dataset = ttk.Button(
self.parent,
style='my.TButton',
text='附加数据',
command=lambda: self.attach_dataset()
)
self.layout_utils.inside_widget(
src=self.btn_attach_dataset,
target=self.label_frame_dataset,
width=120,
height=24,
)
# 附加训练集 - 显示框
self.attach_dataset_val = tk.StringVar()
self.attach_dataset_val.set('')
self.attach_dataset_entry = ttk.Entry(
self.parent, textvariable=self.attach_dataset_val, justify=tk.LEFT, state=tk.DISABLED
)
self.layout_utils.next_to_widget(
src=self.attach_dataset_entry,
target=self.btn_attach_dataset,
width=420,
height=24,
tiny_space=True
)
# 验证集数目 - 标签
self.validation_num_text = ttk.Label(self.parent, font=("simsun", 10), text='验证集数目', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.validation_num_text,
target=self.attach_dataset_entry,
width=120,
height=20,
tiny_space=False,
offset_y=2
)
# 验证集数目 - 输入框
self.validation_num_val = tk.IntVar()
self.validation_num_val.set(300)
self.validation_num_entry = ttk.Entry(self.parent, textvariable=self.validation_num_val, justify=tk.LEFT)
self.layout_utils.next_to_widget(
src=self.validation_num_entry,
target=self.validation_num_text,
width=71,
height=20,
tiny_space=True
)
# 训练集路径 - 标签
self.dataset_train_path_text = ttk.Label(self.parent, font=("simsun", 10), text='训练集数据集', anchor=tk.W)
self.layout_utils.below_widget(
src=self.dataset_train_path_text,
target=self.btn_attach_dataset,
width=100,
height=20,
tiny_space=False
)
# 训练集路径 - 列表框
self.dataset_train_listbox = tk.Listbox(self.parent, font=('微软雅黑', 9))
self.layout_utils.next_to_widget(
src=self.dataset_train_listbox,
target=self.dataset_train_path_text,
width=640,
height=36,
tiny_space=False
)
self.dataset_train_listbox.bind(
sequence="<Delete>",
func=lambda x: self.listbox_delete_item_callback(x, self.dataset_train_listbox)
)
self.listbox_scrollbar(self.dataset_train_listbox)
# 验证集路径 - 标签
label_edge = self.layout_utils.object_edge_info(self.dataset_train_path_text)
widget_edge = self.layout_utils.object_edge_info(self.dataset_train_listbox)
self.dataset_validation_path_text = ttk.Label(self.parent, font=("simsun", 10), text='验证集数据集', anchor=tk.W)
self.dataset_validation_path_text.place(
x=label_edge['x'],
y=widget_edge['edge_y'] + self.layout['global']['space']['y'] / 2,
width=100,
height=20
)
# 验证集路径 - 下拉输入框
self.dataset_validation_listbox = tk.Listbox(self.parent, font=('微软雅黑', 9))
self.layout_utils.next_to_widget(
src=self.dataset_validation_listbox,
target=self.dataset_validation_path_text,
width=640,
height=36,
tiny_space=False
)
self.dataset_validation_listbox.bind(
sequence="<Delete>",
func=lambda x: self.listbox_delete_item_callback(x, self.dataset_validation_listbox)
)
self.listbox_scrollbar(self.dataset_validation_listbox)
self.sample_map = {
DatasetType.Directory: {
RunMode.Trains: self.source_train_path_listbox,
RunMode.Validation: self.source_validation_path_listbox
},
DatasetType.TFRecords: {
RunMode.Trains: self.dataset_train_listbox,
RunMode.Validation: self.dataset_validation_listbox
}
}
# 开始训练 - 按钮
self.btn_training = ttk.Button(self.parent, style='my.TButton', text='开始训练',
command=lambda: self.start_training())
self.layout_utils.widget_from_right(
src=self.btn_training,
target=self.label_frame_dataset,
width=120,
height=24,
tiny_space=True
)
# 终止训练 - 按钮
self.btn_stop = ttk.Button(self.parent, style='my.TButton', text='停止', command=lambda: self.stop_training())
self.button_state(self.btn_stop, tk.DISABLED)
self.layout_utils.before_widget(
src=self.btn_stop,
target=self.btn_training,
width=60,
height=24,
tiny_space=True
)
# 编译模型 - 按钮
self.btn_compile = ttk.Button(self.parent, style='my.TButton', text='编译', command=lambda: self.compile())
self.layout_utils.before_widget(
src=self.btn_compile,
target=self.btn_stop,
width=80,
height=24,
tiny_space=True
)
# 打包训练集 - 按钮
self.btn_make_dataset = ttk.Button(self.parent, style='my.TButton', text='打包数据集',
command=lambda: self.make_dataset())
self.layout_utils.before_widget(
src=self.btn_make_dataset,
target=self.btn_compile,
width=120,
height=24,
tiny_space=True
)
# 清除训练记录 - 按钮
self.btn_reset_history = ttk.Button(
self.parent, style='my.TButton', text='清空训练记录', command=lambda: self.reset_history()
)
self.layout_utils.before_widget(
src=self.btn_reset_history,
target=self.btn_make_dataset,
width=120,
height=24,
tiny_space=True
)
# 预测 - 按钮
self.btn_testing = ttk.Button(
self.parent, style='my.TButton', text='测试', command=lambda: self.testing_model()
)
self.layout_utils.before_widget(
src=self.btn_testing,
target=self.btn_reset_history,
width=80,
height=24,
tiny_space=True
)
self.parent.geometry(size)
@staticmethod
def threading_exec(func, *args) -> threading.Thread:
th = threading.Thread(target=func, args=args)
th.setDaemon(True)
th.start()
return th
def popup_data_augmentation(self):
if not self.current_project:
messagebox.showerror(
"Error!", "请先配置项目名."
)
return
data_augmentation = DataAugmentationDialog()
data_augmentation.read_conf(self.data_augmentation_entity)
def popup_pretreatment(self):
if not self.current_project:
messagebox.showerror(
"Error!", "请先配置项目名."
)
return
pretreatment = PretreatmentDialog()
pretreatment.read_conf(self.pretreatment_entity)
@staticmethod
def listbox_scrollbar(listbox: tk.Listbox):
y_scrollbar = tk.Scrollbar(
listbox, command=listbox.yview
)
y_scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
listbox.config(yscrollcommand=y_scrollbar.set)
def blank_click(self, event):
if self.current_project != self.comb_project_name.get():
self.project_name_fill_callback(event)
def project_name_fill_callback(self, event):
suffix = '-{}-{}-H{}-{}-C{}'.format(
self.comb_neu_cnn.get(),
self.comb_recurrent.get(),
self.units_num_spin.get(),
self.comb_loss.get(),
self.comb_channel.get(),
)
current_project_name = self.comb_project_name.get()
if len(current_project_name) > 0 and current_project_name not in self.project_names:
self.extract_regex = ".*?(?=_)"
self.label_from_var.set('FileName')
self.sample_map[DatasetType.Directory][RunMode.Trains].delete(0, tk.END)
self.sample_map[DatasetType.Directory][RunMode.Validation].delete(0, tk.END)
self.category_val.set("")
if not current_project_name.endswith(suffix):
self.comb_project_name.insert(tk.END, suffix)
self.current_project = self.comb_project_name.get()
self.update_dataset_files_path(mode=RunMode.Trains)
self.update_dataset_files_path(mode=RunMode.Validation)
self.data_augmentation_entity = DataAugmentationEntity()
self.pretreatment_entity = PretreatmentEntity()
@property
def project_path(self):
if not self.current_project:
return None
project_path = "{}/{}".format(self.project_root_path, self.current_project)
if not os.path.exists(project_path):
os.makedirs(project_path)
return project_path
def update_dataset_files_path(self, mode: RunMode):
dataset_name = "dataset/{}.0.tfrecords".format(mode.value)
dataset_path = os.path.join(self.project_path, dataset_name)
dataset_path = dataset_path.replace("\\", '/')
self.sample_map[DatasetType.TFRecords][mode].delete(0, tk.END)
self.sample_map[DatasetType.TFRecords][mode].insert(tk.END, dataset_path)
self.save_conf()
def attach_dataset(self):
if self.is_task_running:
messagebox.showerror(
"Error!", "请先结束当前训练或者等待训练完成."
)
return
if not self.current_project:
messagebox.showerror(
"Error!", "请先配置项目名."
)
return
filename = filedialog.askdirectory()
if not filename:
return
model_conf = ModelConfig(self.current_project)
if not self.check_dataset(model_conf):
return
self.attach_dataset_val.set(filename)
self.sample_map[DatasetType.Directory][RunMode.Trains].insert(tk.END, filename)
self.button_state(self.btn_attach_dataset, tk.DISABLED)
for mode in [RunMode.Trains, RunMode.Validation]:
attached_dataset_name = model_conf.dataset_increasing_name(mode)
attached_dataset_name = "dataset/{}".format(attached_dataset_name)
attached_dataset_path = os.path.join(self.project_path, attached_dataset_name)
attached_dataset_path = attached_dataset_path.replace("\\", '/')
if mode == RunMode.Validation and self.validation_num_val.get() == 0:
continue
self.sample_map[DatasetType.TFRecords][mode].insert(tk.END, attached_dataset_path)
self.save_conf()
model_conf = ModelConfig(self.current_project)
self.threading_exec(
lambda: DataSets(model_conf).make_dataset(
trains_path=filename,
is_add=True,
callback=lambda: self.button_state(self.btn_attach_dataset, tk.NORMAL),
msg=lambda x: tk.messagebox.showinfo('附加数据状态', x)
)
)
pass
@staticmethod
def button_state(btn: ttk.Button, state: str):
btn['state'] = state
def delete_project(self):
if not self.current_project:
messagebox.showerror(
"Error!", "请选择一个项目删除."
)
return
if self.is_task_running:
messagebox.showerror(
"Error!", "请先结束当前训练或者等待训练完成."
)
return
project_path = "./projects/{}".format(self.current_project)
try:
shutil.rmtree(project_path)
except Exception as e:
messagebox.showerror(
"Error!", json.dumps(e.args, ensure_ascii=False)
)
messagebox.showinfo(
"Error!", "删除成功!"
)
self.comb_project_name.delete(0, tk.END)
def reset_history(self):
if not self.current_project:
messagebox.showerror(
"Error!", "请先选择一个项目."
)
return
if self.is_task_running:
messagebox.showerror(
"Error!", "请先结束当前训练或者等待训练完成."
)
return
project_history_path = "./projects/{}/model".format(self.current_project)
try:
shutil.rmtree(project_history_path)
except Exception as e:
messagebox.showerror(
"Error!", json.dumps(e.args, ensure_ascii=False)
)
messagebox.showinfo(
"消息", "清空训练历史成功!"
)
def testing_model(self):
filename = filedialog.askdirectory()
if not filename:
return
filename = filename.replace("\\", "/")
predict = Predict(project_name=self.current_project)
predict.testing(image_dir=filename, limit=self.validation_batch_size)
def clear_dataset(self):
if not self.current_project:
messagebox.showerror(
"Error!", "请先选择一个项目."
)
return
if self.is_task_running:
messagebox.showerror(
"Error!", "请先结束当前训练或者等待训练完成."
)
return
project_history_path = "./projects/{}/dataset".format(self.current_project)
try:
shutil.rmtree(project_history_path)
self.dataset_train_listbox.delete(1, tk.END)
self.dataset_validation_listbox.delete(1, tk.END)
except Exception as e:
messagebox.showerror(
"Error!", json.dumps(e.args, ensure_ascii=False)
)
messagebox.showinfo(
"消息", "清空数据集成功!"
)
@staticmethod
def popup_about():
messagebox.showinfo("关于",
"Eve-深度训练 核心版本({})\n\n作者邮箱: kerlomz@gmail.com\n\nQQ 群: 857149419".format(
get_version()))
def auto_loss(self, event):
if self.comb_recurrent.get() == 'NoRecurrent':
self.comb_loss.set("CrossEntropy")
@staticmethod
def get_param(src: dict, key, default=None):
result = src.get(key)
return result if result else default
def read_conf(self):
print('Reading configuration...')
selected = self.comb_project_name.get()
self.current_project = selected
model_conf = ModelConfig(selected)
self.edit_var.set(model_conf.memory_usage)
self.size_val.set("[{}, {}]".format(model_conf.image_width, model_conf.image_height))
self.resize_val.set(json.dumps(model_conf.resize))
self.source_train_path_listbox.delete(0, tk.END)
self.source_validation_path_listbox.delete(0, tk.END)
self.dataset_validation_listbox.delete(0, tk.END)
self.dataset_train_listbox.delete(0, tk.END)
for source_train in self.get_param(model_conf.trains_path, DatasetType.Directory, default=[]):
self.source_train_path_listbox.insert(tk.END, source_train)
for source_validation in self.get_param(model_conf.validation_path, DatasetType.Directory, default=[]):
self.source_validation_path_listbox.insert(tk.END, source_validation)
self.label_num_spin.set(model_conf.max_label_num)
self.comb_channel.set(model_conf.image_channel)
self.comb_neu_cnn.set(model_conf.neu_cnn_param)
self.comb_recurrent.set(model_conf.neu_recurrent_param)
self.units_num_spin.set(model_conf.units_num)
self.comb_loss.set(model_conf.loss_func_param)
self.extract_regex = model_conf.extract_regex
self.label_split = model_conf.label_split
self.label_from_var.set(model_conf.label_from.value)
self.comb_optimizer.set(model_conf.neu_optimizer_param)
self.learning_rate_spin.set(model_conf.trains_learning_rate)
self.end_acc_val.set(model_conf.trains_end_acc)
self.end_cost_val.set(model_conf.trains_end_cost)
self.end_epochs_spin.set(model_conf.trains_end_epochs)
self.batch_size_val.set(model_conf.batch_size)
self.validation_batch_size_val.set(model_conf.validation_batch_size)
self.validation_num_val.set(model_conf.validation_set_num)
self.data_augmentation_entity.binaryzation = model_conf.da_binaryzation
self.data_augmentation_entity.median_blur = model_conf.da_median_blur
self.data_augmentation_entity.gaussian_blur = model_conf.da_gaussian_blur
self.data_augmentation_entity.equalize_hist = model_conf.da_equalize_hist
self.data_augmentation_entity.laplace = model_conf.da_laplace
self.data_augmentation_entity.warp_perspective = model_conf.da_warp_perspective
self.data_augmentation_entity.rotate = model_conf.da_rotate
self.data_augmentation_entity.sp_noise = model_conf.da_sp_noise
self.data_augmentation_entity.brightness = model_conf.da_brightness
self.data_augmentation_entity.hue = model_conf.da_hue
self.data_augmentation_entity.saturation = model_conf.da_saturation
self.data_augmentation_entity.gamma = model_conf.da_gamma
self.data_augmentation_entity.channel_swap = model_conf.da_channel_swap
self.data_augmentation_entity.random_blank = model_conf.da_random_blank
self.data_augmentation_entity.random_transition = model_conf.da_random_transition
self.data_augmentation_entity.random_captcha = model_conf.da_random_captcha
self.pretreatment_entity.binaryzation = model_conf.pre_binaryzation
self.pretreatment_entity.replace_transparent = model_conf.pre_replace_transparent
self.pretreatment_entity.horizontal_stitching = model_conf.pre_horizontal_stitching
self.pretreatment_entity.concat_frames = model_conf.pre_concat_frames
self.pretreatment_entity.blend_frames = model_conf.pre_blend_frames
self.pretreatment_entity.exec_map = model_conf.pre_exec_map
for dataset_validation in self.get_param(model_conf.validation_path, DatasetType.TFRecords, default=[]):
self.dataset_validation_listbox.insert(tk.END, dataset_validation)
for dataset_train in self.get_param(model_conf.trains_path, DatasetType.TFRecords, default=[]):
self.dataset_train_listbox.insert(tk.END, dataset_train)
# print('Loading category configuration...')
if isinstance(model_conf.category_param, list):
self.category_entry['state'] = tk.DISABLED
self.comb_category.set('CUSTOMIZED')
if len(model_conf.category_param) > 1000:
self.category_val.set(NOT_EDITABLE_MSG)
else:
self.category_val.set(model_conf.category_param_text)
self.category_entry['state'] = tk.NORMAL
else:
self.category_val.set("")
self.category_entry['state'] = tk.DISABLED
self.comb_category.set(model_conf.category_param)
# print('Loading configuration is completed.')
self.model_conf = model_conf
return self.model_conf
@property
def validation_batch_size(self):
# if self.dataset_validation_listbox.size() > 1:
return self.validation_batch_size_val.get()
# else:
# return min(self.validation_batch_size_val.get(), self.validation_num_val.get())
@property
def device_usage(self):
return self.edit_var.get()
def save_conf(self):
if not self.current_project:
messagebox.showerror(
"Error!", "请先配置项目名."
)
return
model_conf = ModelConfig(
project_name=self.current_project,
MemoryUsage=self.device_usage,
CNNNetwork=self.neu_cnn,
RecurrentNetwork=self.neu_recurrent,
UnitsNum=self.units_num_spin.get(),
Optimizer=self.optimizer,
LossFunction=self.loss_func,
Decoder=self.comb_loss.get(),
ModelName=self.current_project,
ModelField=ModelField.Image.value,
ModelScene=ModelScene.Classification.value,
Category=self.category,
Resize=self.resize,
ImageChannel=self.comb_channel.get(),
ImageWidth=self.image_width,
ImageHeight=self.image_height,
MaxLabelNum=self.label_num_spin.get(),
AutoPadding=True,
ReplaceTransparent=False,
HorizontalStitching=False,
OutputSplit='',
LabelFrom=self.label_from_var.get(),
ExtractRegex=self.extract_regex,
LabelSplit=self.label_split,
DatasetTrainsPath=self.dataset_value(
dataset_type=DatasetType.TFRecords, mode=RunMode.Trains
),
DatasetValidationPath=self.dataset_value(
dataset_type=DatasetType.TFRecords, mode=RunMode.Validation
),
SourceTrainPath=self.dataset_value(
dataset_type=DatasetType.Directory, mode=RunMode.Trains
),
SourceValidationPath=self.dataset_value(
dataset_type=DatasetType.Directory, mode=RunMode.Validation
),
ValidationSetNum=self.validation_num_val.get(),
SavedSteps=100,
ValidationSteps=500,
EndAcc=self.end_acc_val.get(),
EndCost=self.end_cost_val.get(),
EndEpochs=self.end_epochs_spin.get(),
BatchSize=self.batch_size_val.get(),
ValidationBatchSize=self.validation_batch_size,
LearningRate=self.learning_rate_spin.get(),
DA_Binaryzation=self.data_augmentation_entity.binaryzation,
DA_MedianBlur=self.data_augmentation_entity.median_blur,
DA_GaussianBlur=self.data_augmentation_entity.gaussian_blur,
DA_EqualizeHist=self.data_augmentation_entity.equalize_hist,
DA_Laplace=self.data_augmentation_entity.laplace,
DA_WarpPerspective=self.data_augmentation_entity.warp_perspective,
DA_Rotate=self.data_augmentation_entity.rotate,
DA_PepperNoise=self.data_augmentation_entity.sp_noise,
DA_Brightness=self.data_augmentation_entity.brightness,
DA_Saturation=self.data_augmentation_entity.saturation,
DA_Hue=self.data_augmentation_entity.hue,
DA_Gamma=self.data_augmentation_entity.gamma,
DA_ChannelSwap=self.data_augmentation_entity.channel_swap,
DA_RandomBlank=self.data_augmentation_entity.random_blank,
DA_RandomTransition=self.data_augmentation_entity.random_transition,
DA_RandomCaptcha=self.data_augmentation_entity.random_captcha,
Pre_Binaryzation=self.pretreatment_entity.binaryzation,
Pre_ReplaceTransparent=self.pretreatment_entity.replace_transparent,
Pre_HorizontalStitching=self.pretreatment_entity.horizontal_stitching,
Pre_ConcatFrames=self.pretreatment_entity.concat_frames,
Pre_BlendFrames=self.pretreatment_entity.blend_frames,
Pre_ExecuteMap=self.pretreatment_entity.exec_map
)
model_conf.update()
return model_conf
def make_dataset(self):
if not self.current_project:
messagebox.showerror(
"Error!", "请先配置项目名."
)
return
if self.is_task_running:
messagebox.showerror(
"Error!", "请先结束当前训练或者等待训练完成."
)
return
self.save_conf()
self.button_state(self.btn_make_dataset, tk.DISABLED)
model_conf = ModelConfig(self.current_project)
train_path = self.dataset_value(DatasetType.Directory, RunMode.Trains)
validation_path = self.dataset_value(DatasetType.Directory, RunMode.Validation)
if len(train_path) < 1:
messagebox.showerror(
"错误!", "{} 样本尚未被添加.".format(RunMode.Trains.value)
)
self.button_state(self.btn_make_dataset, tk.NORMAL)
return
self.threading_exec(
lambda: DataSets(model_conf).make_dataset(
trains_path=train_path,
validation_path=validation_path,
is_add=False,
callback=lambda: self.button_state(self.btn_make_dataset, tk.NORMAL),
msg=lambda x: tk.messagebox.showinfo('打包数据集状态', x)
)
)
@property
def size(self):
return self.json_filter(self.size_val.get(), int)
@property
def image_height(self):
return self.size[1]
@property
def image_width(self):
return self.size[0]
@property
def resize(self):
return self.json_filter(self.resize_val.get(), int)
@property
def neu_cnn(self):
return self.comb_neu_cnn.get()
@property
def neu_recurrent(self):
return self.comb_recurrent.get()
@property
def loss_func(self):
return self.comb_loss.get()
@property
def optimizer(self):
return self.comb_optimizer.get()
@staticmethod
def json_filter(content, item_type):
if not content:
messagebox.showerror(
"Error!", "您选择了自定义分类,必须手动指定分类集."
)
return None
try:
content = json.loads(content)
except ValueError as e:
messagebox.showerror(
"Error!", "输入格式必须符合JSON."
)
return None
content = [item_type(i) for i in content]
return content
@property
def category(self):
comb_selected = self.comb_category.get()
if not comb_selected:
messagebox.showerror(
"Error!", "请选择内置分类或自定义分类"
)
return None
if comb_selected == 'CUSTOMIZED':
category_value = self.category_entry.get()
if category_value == NOT_EDITABLE_MSG:
return self.model_conf.category_param_text
category_value = category_value.replace("'", '"') if "'" in category_value else category_value
category_value = self.json_filter(category_value, str)
else:
category_value = comb_selected
return category_value
def dataset_value(self, dataset_type: DatasetType, mode: RunMode):
listbox = self.sample_map[dataset_type][mode]
value = list(listbox.get(0, listbox.size() - 1))
return value
def compile_task(self):
if not self.current_project:
messagebox.showerror(
"Error!", "请先配置项目名."
)
return
model_conf = ModelConfig(project_name=self.current_project)
if not os.path.exists(model_conf.model_root_path):
messagebox.showerror(
"Error", "模型存储路径不存在."
)
return
if len(os.listdir(model_conf.model_root_path)) < 3:
messagebox.showerror(
"Error", "当前无训练记录,请先训练再编译."
)
return
try:
if not self.current_task:
self.current_task = Trains(model_conf)
self.current_task.compile_graph(0)
status = '编译完成'
except Exception as e:
messagebox.showerror(
e.__class__.__name__, json.dumps(e.args, ensure_ascii=False)
)
status = '编译失败'
tk.messagebox.showinfo('编译状态', status)
def compile(self):
self.job = self.threading_exec(
lambda: self.compile_task()
)
def training_task(self):
model_conf = ModelConfig(project_name=self.current_project)
self.current_task = Trains(model_conf)
try:
self.button_state(self.btn_training, tk.DISABLED)
self.button_state(self.btn_stop, tk.NORMAL)
self.is_task_running = True
self.current_task.train_process()
status = '训练完成'
except Exception as e:
traceback.print_exc()
messagebox.showerror(
e.__class__.__name__, json.dumps(e.args, ensure_ascii=False)
)
status = '训练失败'
self.button_state(self.btn_training, tk.NORMAL)
self.button_state(self.btn_stop, tk.DISABLED)
self.comb_project_name['state'] = tk.NORMAL
self.is_task_running = False
tk.messagebox.showinfo('训练状态', status)
@staticmethod
def check_dataset(model_conf):
trains_path = model_conf.trains_path[DatasetType.TFRecords]
validation_path = model_conf.validation_path[DatasetType.TFRecords]
if not trains_path or not validation_path:
messagebox.showerror(
"Error!", "训练集或验证集未定义."
)
return False
for tp in trains_path:
if not os.path.exists(tp):
messagebox.showerror(
"Error!", "训练集集路径不存在,请先打包样本."
)
return False
for vp in validation_path:
if not os.path.exists(vp):
messagebox.showerror(
"Error!", "验证集路径不存在,请先打包样本"
)
return False
return True
def start_training(self):
if not self.check_resize():
return
if not self.current_project:
messagebox.showerror(
"Error!", "请先配置项目名."
)
return
model_conf = self.save_conf()
if not self.check_dataset(model_conf):
return
self.comb_project_name['state'] = tk.DISABLED
self.job = self.threading_exec(
lambda: self.training_task()
)
def stop_training(self):
self.current_task.stop_flag = True
@property
def project_names(self):
return [i.name for i in os.scandir(self.project_root_path) if i.is_dir()]
def fetch_projects(self):
self.comb_project_name['values'] = self.project_names
def browse_dataset(self, dataset_type: DatasetType, mode: RunMode):
if not self.current_project:
messagebox.showerror(
"Error!", "请先配置项目名."
)
return
filename = filedialog.askdirectory()
if not filename:
return
is_sub = False
for i, item in enumerate(os.scandir(filename)):
if item.is_dir():
path = item.path.replace("\\", "/")
if self.sample_map[dataset_type][mode].size() == 0:
self.fetch_sample([path])
self.sample_map[dataset_type][mode].insert(tk.END, path)
if i > 0:
continue
is_sub = True
else:
break
if not is_sub:
filename = filename.replace("\\", "/")
if self.sample_map[dataset_type][mode].size() == 0:
self.fetch_sample([filename])
self.sample_map[dataset_type][mode].insert(tk.END, filename)
@staticmethod
def closest_category(category):
category = set(category)
category_group = dict()
for key in SIMPLE_CATEGORY_MODEL.keys():
category_set = set(category_extract(key))
if category <= category_set:
category_group[key] = len(category_set) - len(category)
if not category_group:
return None
min_index = min(category_group.values())
for k, v in category_group.items():
if v == min_index:
return k
def fetch_category(self):
if self.model_conf.label_from == LabelFrom.TXT or self.label_from_var.get() == LabelFrom.TXT.value:
messagebox.showerror(
"Error!", "当前标签源不支持."
)
return
self.save_conf()
category_list = fetch_category_list(self.model_conf, is_json=True)
if not category_list:
return
self.comb_category.current(0)
if len(category_list) > 1000:
self.category_entry['state'] = tk.DISABLED
self.category_val.set(NOT_EDITABLE_MSG)
self.model_conf.category_param_text = category_list
else:
self.category_entry['state'] = tk.NORMAL
self.category_val.set(category_list)
self.save_conf()
def fetch_sample(self, dataset_path):
file_names = os.listdir(dataset_path[0])[0:100]
category = list()
len_label = -1
for file_name in file_names:
if "_" in file_name:
label = file_name.split("_")[0]
label = [i for i in label]
len_label = len(label)
category.extend(label)
size = PilImage.open(os.path.join(dataset_path[0], file_names[0])).size
self.size_val.set(json.dumps(size))
self.resize_val.set(json.dumps(size))
self.label_num_spin.set(len_label)
if not self.category_val.get() or self.category_val.get() != NOT_EDITABLE_MSG:
category_pram = self.closest_category(category)
if not category_pram:
return
self.comb_category.set(category_pram)
def listbox_delete_item_callback(self, event, listbox: tk.Listbox):
try:
i = listbox.curselection()[0]
listbox.delete(i)
self.save_conf()
except IndexError as e:
print(e)
def comb_category_callback(self, event):
comb_selected = self.comb_category.get()
if comb_selected == 'CUSTOMIZED':
self.category_entry['state'] = tk.NORMAL
else:
self.category_entry.delete(0, tk.END)
self.category_entry['state'] = tk.DISABLED
def check_resize(self):
if self.loss_func == 'CTC':
return True
param = OUTPUT_SHAPE1_MAP[NETWORK_MAP[self.neu_cnn]]
shape1w = math.ceil(1.0 * self.resize[0] / param[0])
shape1h = math.ceil(1.0 * self.resize[1] / param[0])
input_s1 = shape1w * shape1h * param[1]
label_num = int(self.label_num_spin.get())
if input_s1 % label_num != 0:
messagebox.showerror(
"Error!", "Shape[1] = {} 必须被 label_num = {} 整除.".format(input_s1, label_num)
)
return False
return True
@staticmethod
def resource_path(relative_path):
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except AttributeError:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
if __name__ == '__main__':
root = tk.Tk()
app = Wizard(root)
root.mainloop()
|
bluecoat.py
|
import http.server
import json
import re
import socketserver
import sys
import threading
from urllib.parse import urlparse
import time
import traceback
import requests
import os
class NewHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
self.path = 'webroot/index.html'
return http.server.SimpleHTTPRequestHandler.do_GET(self)
class ThreadedHTTPServer(object):
handler = NewHandler
def __init__(self, host, port):
self.server = socketserver.TCPServer((host, port), self.handler)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
def start(self):
self.server_thread.start()
def stop(self):
self.server.shutdown()
self.server.server_close()
class Bluecoat:
def __init__(self, url, clonesite):
self.url = url
self.clonesite = clonesite
self.server = ''
def clone(self):
print("[-] Cloning " + self.clonesite)
headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)'}
webContent = requests.get(self.clonesite, headers=headers).content
if not os.path.exists('webroot'):
os.makedirs('webroot')
try:
if webContent.lower().index(b"<base href=\""):
pass
except ValueError:
parsed_uri = urlparse(self.clonesite)
base = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
webContent = re.sub(b"(<head.*?>)", b"\g<0>\n<base href=\"" + bytes(base, encoding='utf8') + b"\">", webContent, count=1, flags=re.IGNORECASE)
with open('webroot/index.html', 'wb') as indexFile:
indexFile.write(webContent)
indexFile.close()
def check_category(self):
# Category checking lifted from CatMyFish
# https://github.com/Mr-Un1k0d3r/CatMyFish/blob/master/CatMyFish.py
print("[-] Checking category for " + self.url)
session = requests.session()
url = "https://sitereview.bluecoat.com/resource/lookup"
cookies = {"XSRF-TOKEN": "028e5984-50bf-4c00-ad38-87d19957201a"}
headers = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0",
"Accept": "application/json, text/plain, */*", "Accept-Language": "en_US",
"Accept-Encoding": "gzip, deflate", "Referer": "https://sitereview.bluecoat.com/",
"X-XSRF-TOKEN": "028e5984-50bf-4c00-ad38-87d19957201a",
"Content-Type": "application/json; charset=utf-8", "Connection": "close"}
data = {"captcha": "", "key": "",
"phrase": "RXZlbiBpZiB5b3UgYXJlIG5vdCBwYXJ0IG9mIGEgY29tbWVyY2lhbCBvcmdhbml6YXRpb24sIHNjcmlwdGluZyBhZ2FpbnN0IFNpdGUgUmV2aWV3IGlzIHN0aWxsIGFnYWluc3QgdGhlIFRlcm1zIG9mIFNlcnZpY2U=",
"source": "new lookup", "url": self.url}
response = session.post(url, headers=headers, cookies=cookies, json=data)
try:
json_data = json.loads(response.content)
if "errorType" in json_data:
if json_data["errorType"] == "captcha":
print("[-] BlueCoat blocked us :(")
return("Blocked by BlueCoat")
sys.exit(0)
category = []
for entry in json_data["categorization"]:
category.append(entry["name"])
cat = ', '.join(category)
print("\033[1;32m[-] Your site is categorised as: " + cat + "\033[0;0m")
return(cat)
except Exception as e:
traceback.print_exc()
print("[-] An error occurred")
def serve_content(self):
print("[-] Serving content over HTTP server")
self.server = ThreadedHTTPServer("0.0.0.0", 8000)
try:
self.server.start()
except:
pass
def shutdown_server(self):
print("[-] Shutting down HTTP server")
self.server.stop()
def run(self):
self.clone()
self.serve_content()
time.sleep(10)
self.check_category()
self.shutdown_server()
if __name__ == "__main__":
url = sys.argv[1]
clonesite = sys.argv[2]
b = Bluecoat(url, clonesite)
b.clone()
b.serve_content()
time.sleep(10)
b.check_category()
b.shutdown_server()
|
agent.py
|
# ------------------------------------------------------------------------------
# Copyright 2021 Mohammad Reza Golsorkhi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
# name: agent.py
# Description: A agent that handles jobs and run jobs
# Version: 0.1.3
# Author: Mohammad Reza Golsorkhi
# ------------------------------------------------------------------------------
import datetime
import threading
from time import sleep
import src.agent.exceptions as exceptions
import src.agent.interrupt as _interrupt
from src.agent.job import FunctionJob, Job
import logging
from pathlib import Path
try:
import dill
is_dill_available = True
except ImportError:
is_dill_available = False
logger = logging.getLogger(__name__)
def handle_error_no_dill():
print(f"""
dill in not installed
""")
class Agent:
_name: str
_initialized = False
_Agent_counter = 0
_job_id_counter = 0
def __repr__(self):
return f'name : {self.name} agent_id : {self._id}'
def __init__(self, daemon=True, id=None, name=None, **kwargs):
# increment
Agent._Agent_counter += 1
self._id = Agent._Agent_counter if id is None else id
self.jobs = []
self._daemon = daemon
self._started = threading.Event()
self._is_stop = threading.Event()
self._interrupt = _interrupt.NoneInterrupt(self)
self._name = str(name or Agent._newname())
self.is_running = threading.Event()
self.__dict__.update(kwargs)
self._initialized = True
@staticmethod
def _newname():
return 'Agent-' + str(Agent._Agent_counter)
def _agent(self):
self.is_running.set()
logger.info(msg=f'agent {self.name} started')
while not self._is_stop.is_set():
for job in self.jobs:
if self._interrupt.is_set():
self._interrupt.lock.acquire()
self._interrupt.interrupt_handler()
self._interrupt.lock.release()
self._interrupt.clear()
if (job.initialized is True) and (job.is_not_running.is_set() and job.is_enable) and (
job.next_run_time <= datetime.datetime.now()):
job.start(0.01)
sleep(1)
self.is_running.clear()
logger.info(msg=f'agent {self.name} stopped')
return 0
@staticmethod
def _get_new_job_id():
Agent._job_id_counter += 1
return Agent._job_id_counter
def append_job(self, job: Job, name=None):
"""
load a job and add to jobs list
:param job: a instance of job
:param name: name default is job.name
:return: None
"""
if name:
job.name = name
if self.get_job_by_name(job.name):
while True:
counter = 1
if self.get_job_by_name(str(job.name + f' ({counter})')) is None:
job.name = str(job.name + f' ({counter})')
break
job._id = self._get_new_job_id()
job.agent = self
self.jobs.append(job)
def load_job(self, filepath, name=None, **kwargs):
"""
load a job file and add to agent
:param filepath: path to job file
:param name: name default is job.name
:param kwargs:
:return: None
"""
if not is_dill_available:
handle_error_no_dill()
return
filepath = Path(filepath)
if filepath.exists() and filepath.is_file():
with open(filepath, 'rb') as file:
job = dill.load(file, **kwargs)
if isinstance(job, Job):
self.append_job(job=job, name=name)
else:
raise TypeError(f'object in {filepath} file is not a instance of Job')
def loads_job(self, str, name=None, **kwargs):
"""
:param str:
:param name: name default is job.name
:param kwargs:
:return:
"""
if not is_dill_available:
handle_error_no_dill()
return
job = dill.loads(str, **kwargs)
if not isinstance(job, Job):
raise TypeError(f'object in {type(job)} is not a instance of Job')
self.append_job(job=job, name=name)
@staticmethod
def save_job(job: Job, dirpath, file_name=None, protocol=None, **kwargs):
"""
save job in to a file
:param job: get a job you can us get_job_by_name or get_job_by_id
:param dirpath: path to dir you want job be save
:param file_name: name of file default is job.name
:param protocol: dill protocol
:param kwargs:
:return:
"""
if not is_dill_available:
handle_error_no_dill()
return
dirpath = Path(dirpath)
data_file_path = dirpath.joinpath(str((job.name if file_name is None else file_name) + '.job'))
if not dirpath.exists():
dirpath.mkdir(parents=True)
with open(data_file_path, mode='wb') as file:
dill.dump(obj=job, file=file, protocol=protocol, **kwargs)
@staticmethod
def dumps_job(job, protocol=None, **kwargs):
"""
this methode is like dill dumps methode
:param job: a job you can get it whit get_job_by_name or get_job_by_id
:param protocol: dill protocol
:param kwargs: for dill or dill you can pass more argument but because the difference within dill and dill
I only give **kwargs to dumps function
:return: str
"""
if not is_dill_available:
handle_error_no_dill()
return
return dill.dumps(obj=job, protocol=protocol, **kwargs)
def _add_job(self, func, options, is_enable, args, kwargs, name, **job_variables):
job_id = Agent._get_new_job_id()
if name is None:
name = 'job_' + str(job_id)
if self.get_job_by_name(name) is not None:
raise exceptions.DuplicateName('job name must be unique')
self.jobs.append(FunctionJob(self, job_id, name, func, options, is_enable, args, kwargs, **job_variables))
def create_class_job(self, job, options: dict, args=(), kwargs=None, is_enable: bool = True, name: str = None,
**job_variables):
job_id = Agent._get_new_job_id()
if name is None:
name = 'job_' + str(job_id)
if self.get_job_by_name(name) is not None:
raise exceptions.DuplicateName('job name must be unique')
self.jobs.append(job(self, job_id, name, options, is_enable, args, kwargs, **job_variables))
def create_job(self, func, options: dict, args=(), kwargs=None, is_enable: bool = True, name: str = None,
**job_variables):
self._add_job(func, options, is_enable, args, kwargs, name, **job_variables)
def create_job_decorator(self, options: dict, args=(), kwargs=None, is_enable: bool = True, name: str = None,
**job_variables):
def decorator(func):
self._add_job(func, options, is_enable, args, kwargs, name, **job_variables)
return func
return decorator
def get_job_by_name(self, job_name: str):
for job in self.jobs:
if job.name == job_name:
return job
else:
return None
def get_job_by_id(self, job_id: int):
for job in self.jobs:
if job.status['job_id'] == job_id:
return job
else:
return None
@staticmethod
def run_job(job: FunctionJob, timeout=None):
if job:
job.start(timeout)
return 1
else:
return 0
def run_job_by_name(self, job_name: str, timeout=None):
job = self.get_job_by_name(job_name)
if job:
job.start(timeout)
return 1
else:
return 0
def run_job_by_id(self, job_id: int, timeout=None):
job = self.get_job_by_id(job_id)
if job:
job.start(timeout)
return 1
else:
return 0
def get_all_jobs(self):
return self.jobs
def get_all_running_jobs(self):
return [job for job in self.jobs if job.is_running.is_set()]
def start(self):
"""
start agent inner thread
:return: None
"""
if not self._initialized:
raise RuntimeError("Agent.__init__() not called")
if self._started.is_set():
raise RuntimeError("Agent can only be started once")
logger.info(msg=f'agent {self.name} is starting')
threading.Thread(target=self._agent, daemon=self._daemon, name=self._name).start()
self._is_stop.clear()
self._started.set()
def stop(self):
"""
set a stop interrupt that wait for all running job stop
:return: None
"""
if not self._initialized:
raise RuntimeError("Agent.__init__() not called")
if not self._started.is_set():
raise RuntimeError("cannot stop Agent before it is started")
logger.info(msg=f'agent {self.name} is stopping')
self._interrupt = _interrupt.StopInterrupt(self)
self._interrupt.set()
self._interrupt.wait()
self._is_stop.set()
self._started.clear()
logger.info(msg=f'agent {self.name} stopped')
@property
def interrupt(self):
"""
you can active a interrupt whit .set() methode
:return:
"""
if not self._initialized:
raise RuntimeError("Agent.__init__() not called")
return self._interrupt
@interrupt.setter
def interrupt(self, val: _interrupt.BaseInterrupt):
"""
set a interrupt that can be activate whit interrupt.set() methode
:param val: get a class that inherited BaseInterrupt
you can find it in agent.interrupt
:return:
"""
if self._interrupt is not None:
if self._interrupt.lock.locked():
logger.error(msg='interrupt is set waiting for interrupt clear')
self._interrupt.lock.acquire()
self._interrupt.lock.release()
self._interrupt = val
@property
def name(self):
"""A string used for identification purposes only.
It has no semantics. Multiple threads may be given the same name. The
initial name is set by the constructor.
"""
if not self._initialized:
raise RuntimeError("Agent.__init__() not called")
return self._name
@name.setter
def name(self, val: str):
if not self._initialized:
raise RuntimeError("Agent.__init__() not called")
if self._started:
raise PermissionError('cannot set name of active Agent')
else:
self._name = val
@property
def info(self):
"""
get info about agent
:return: dict of info
"""
return {
'version': '0.1.2',
'is_dill_sported': is_dill_available
}
|
all.py
|
from utlis.rank import setrank,isrank,remrank,remsudos,setsudo, GPranks,IDrank
from utlis.send import send_msg, BYusers, GetLink,Name,Glang,getAge
from utlis.locks import st,getOR
from utlis.tg import Bot
from config import *
from pyrogram.types import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton
import threading, requests, time, random, re, json, datetime
import importlib
from os import listdir
from os.path import isfile, join
def allGP(client, message,redis):
type = message.chat.type
userID = message.from_user.id
chatID = message.chat.id
username = message.from_user.username
if username is None:
username = "None"
userFN = message.from_user.first_name
title = message.chat.title
rank = isrank(redis,userID,chatID)
text = message.text
c = importlib.import_module("lang.arcmd")
r = importlib.import_module("lang.arreply")
redis.hincrby("{}Nbot:{}:msgs".format(BOT_ID,chatID),userID)
if text :
if re.search(c.setGPadmin,text):
if re.search("@",text):
user = text.split("@")[1]
if re.search(c.setGPadmin2,text):
user = int(re.search(r'\d+', text).group())
if message.reply_to_message:
user = message.reply_to_message.from_user.id
if 'user' not in locals():return False
if GPranks(userID,chatID) == "member":return False
Getus = Bot("getChatMember",{"chat_id":chatID,"user_id":userID})["result"]
if Getus["status"] == "administrator" and not Getus["can_promote_members"]:return False
try:
getUser = client.get_users(user)
userId = getUser.id
userFn = getUser.first_name
if GPranks(userId,chatID) != "member":return False
pr = Bot("promoteChatMember",{"chat_id":chatID,"user_id":userId,"can_change_info":1,"can_delete_messages":1,"can_invite_users":1,"can_restrict_members":1,"can_pin_messages":1})
if pr["ok"]:
T ="<a href=\"tg://user?id={}\">{}</a>".format(userId,Name(userFn))
Bot("sendMessage",{"chat_id":chatID,"text":r.prGPadmin.format(T),"reply_to_message_id":message.message_id,"parse_mode":"html"})
except Exception as e:
Bot("sendMessage",{"chat_id":chatID,"text":r.userNocc,"reply_to_message_id":message.message_id,"parse_mode":"html"})
if re.search(c.sors,text):
kb = InlineKeyboardMarkup([[InlineKeyboardButton("قناه السورس", url="t.me/eel00")],[InlineKeyboardButton("تواصل السورس", url="t.me/iiiziiii")],[InlineKeyboardButton("شروحات السورس", url="t.me/eel00")]])
Botuser = client.get_me().username
Bot("sendMessage",{"chat_id":chatID,"text":r.sors.format("@"+Botuser),"disable_web_page_preview":True,"reply_to_message_id":message.message_id,"parse_mode":"markdown","reply_markup":kb})
if re.search(c.dellink,text):
kb = InlineKeyboardMarkup([[InlineKeyboardButton(c.dellink2, url="https://telegram.org/deactivate")]])
Botuser = client.get_me().username
Bot("sendMessage",{"chat_id":chatID,"text":r.dellink,"disable_web_page_preview":True,"reply_to_message_id":message.message_id,"parse_mode":"markdown","reply_markup":kb})
if re.search(c.ShowO,text) and (rank is not False or rank is not 0 or rank != "vip"):
reply_markup = getOR(rank,r,userID)
Bot("sendMessage",{"chat_id":chatID,"text":r.Showall,"reply_to_message_id":message.message_id,"parse_mode":"html","disable_web_page_preview":True,"reply_markup":reply_markup})
if text == "عدد الكروب" and (rank is not False or rank is not 0 ):
from pyrogram.api.functions.channels import GetFullChannel
chat = client.resolve_peer(chatID)
full_chat = client.send(GetFullChannel(channel=chat)).full_chat
Bot("sendMessage",{"chat_id":chatID,"text":r.gpinfo.format(message.chat.title,full_chat.participants_count,full_chat.admins_count,full_chat.kicked_count,full_chat.banned_count,message.message_id),"reply_to_message_id":message.message_id,"parse_mode":"html","disable_web_page_preview":True})
if text == c.ID and not redis.sismember("{}Nbot:IDSend".format(BOT_ID),chatID) and not message.reply_to_message:
Ch = True
# if redis.sismember("{}Nbot:IDpt".format(BOT_ID),chatID):
t = IDrank(redis,userID,chatID,r)
msgs = (redis.hget("{}Nbot:{}:msgs".format(BOT_ID,chatID),userID) or 0)
edits = (redis.hget("{}Nbot:{}:edits".format(BOT_ID,chatID),userID) or 0)
rate = int(msgs)*100/20000
age = getAge(userID,r)
if redis.hget("{}Nbot:SHOWid".format(BOT_ID),chatID):
tx = redis.hget("{}Nbot:SHOWid".format(BOT_ID),chatID)
rep = {"#age":"{age}","#name":"{name}","#id":"{id}","#username":"{username}","#msgs":"{msgs}","#stast":"{stast}","#edits":"{edits}","#rate":"{rate}","{us}":"{username}","#us":"{username}"}
for v in rep.keys():
tx = tx.replace(v,rep[v])
else:
tx = r.IDnPT
if not redis.sismember("{}Nbot:IDSendPH".format(BOT_ID),chatID):
get = Bot("getUserProfilePhotos",{"user_id":userID,"offset":0,"limit":1})
if get["ok"] == False:
Ch = True
elif get["result"]["total_count"] == 0:
Ch = True
else:
Ch = False
file_id = get["result"]["photos"][0][0]["file_id"]
Bot("sendPhoto",{"chat_id":chatID,"photo":file_id,"caption":tx.format(username=("@"+username or "None"),id=userID,stast=t,msgs=msgs,edits=edits,age=age,rate=str(rate)+"%"),"reply_to_message_id":message.message_id,"parse_mode":"html"})
if Ch == True:
Bot("sendMessage",{"chat_id":chatID,"text":tx.format(username=("@"+username or "None"),id=userID,stast=t,msgs=msgs,edits=edits,age=age,rate=str(rate)+"%"),"reply_to_message_id":message.message_id,"parse_mode":"html"})
# if not redis.sismember("{}Nbot:IDSendPH".format(BOT_ID),chatID) and not redis.sismember("{}Nbot:IDpt".format(BOT_ID),chatID):
# get = Bot("getUserProfilePhotos",{"user_id":userID,"offset":0,"limit":1})
# if get["ok"] == False:
# Ch = True
# elif get["result"]["total_count"] == 0:
# Ch = True
# else:
# Ch = False
# reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton(r.RIDPHs,callback_data=json.dumps(["ShowDateUser","",userID]))]])
# file_id = get["result"]["photos"][0][0]["file_id"]
# Bot("sendPhoto",{"chat_id":chatID,"photo":file_id,"caption":r.RID.format(userID),"reply_to_message_id":message.message_id,"parse_mode":"html","reply_markup":reply_markup})
# if Ch == True and not redis.sismember("{}Nbot:IDpt".format(BOT_ID),chatID):
# reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton(r.RIDPHs,callback_data=json.dumps(["ShowDateUser","",userID]))]])
# Bot("sendMessage",{"chat_id":chatID,"text":r.RID.format(userID),"reply_to_message_id":message.message_id,"parse_mode":"html","reply_markup":reply_markup})
if text == "رتبتي":
t = IDrank(redis,userID,chatID,r)
Bot("sendMessage",{"chat_id":chatID,"text":f"⏏️꒐ موقعك : {t}","reply_to_message_id":message.message_id,"parse_mode":"html"})
if text == c.ID and not redis.sismember("{}Nbot:IDSend".format(BOT_ID),chatID) and message.reply_to_message:
us = message.reply_to_message.from_user.id
rusername = message.reply_to_message.from_user.username
if rusername is None:
rusername = "None"
t = IDrank(redis,us,chatID,r)
msgs = (redis.hget("{}Nbot:{}:msgs".format(BOT_ID,chatID),us) or 0)
edits = (redis.hget("{}Nbot:{}:edits".format(BOT_ID,chatID),us) or 0)
rate = int(msgs)*100/20000
age = getAge(us,r)
tx = r.ReIDnPT
Bot("sendMessage",{"chat_id":chatID,"text":tx.format(Reus=("@"+rusername or "None"),ReID=us,Rerank=t,Remsgs=msgs,Reedits=edits,Rage=age,Rerate=str(rate)+"%"),"reply_to_message_id":message.message_id,"parse_mode":"html"})
if re.search(c.idus,text) and not redis.sismember("{}Nbot:IDSend".format(BOT_ID),chatID):
user = text.split("@")[1]
try:
getUser = client.get_users(user)
us = getUser.id
rusername = user
if rusername is None:
rusername = "None"
age = getAge(us,r)
t = IDrank(redis,us,chatID,r)
msgs = (redis.hget("{}Nbot:{}:msgs".format(BOT_ID,chatID),us) or 0)
edits = (redis.hget("{}Nbot:{}:edits".format(BOT_ID,chatID),us) or 0)
rate = int(msgs)*100/20000
tx = r.ReIDnPT
Bot("sendMessage",{"chat_id":chatID,"text":tx.format(Reus=("@"+rusername or "None"),ReID=us,Rerank=t,Remsgs=msgs,Reedits=edits,Rage=age,Rerate=str(rate)+"%"),"reply_to_message_id":message.message_id,"parse_mode":"html"})
except Exception as e:
print(e)
if re.search(c.ShowSudos, text):
tx = (redis.get("{}Nbot:SHOWsudos".format(BOT_ID)) or "")
Bot("sendMessage",{"chat_id":chatID,"text":tx,"reply_to_message_id":message.message_id,"parse_mode":"html"})
if text == c.mymsgs:
get = redis.hget("{}Nbot:{}:msgs".format(BOT_ID,chatID),userID)
Bot("sendMessage",{"chat_id":chatID,"text":r.yourmsgs.format((get or 0)),"reply_to_message_id":message.message_id,"parse_mode":"html"})
if text == c.link and not redis.sismember("{}Nbot:showlink".format(BOT_ID),chatID):
get = (redis.hget("{}Nbot:links".format(BOT_ID),chatID) or GetLink(chatID) or "none")
Bot("sendMessage",{"chat_id":chatID,"text":r.showGPlk.format(get),"reply_to_message_id":message.message_id,"parse_mode":"html","disable_web_page_preview":True})
if text == c.myedits:
get = redis.hget("{}Nbot:{}:edits".format(BOT_ID,chatID),userID)
Bot("sendMessage",{"chat_id":chatID,"text":r.youredits.format((get or 0)),"reply_to_message_id":message.message_id,"parse_mode":"html"})
if text == c.myaddcontact:
get = redis.hget("{}Nbot:{}:addcontact".format(BOT_ID,chatID),userID)
Bot("sendMessage",{"chat_id":chatID,"text":r.youraddcontact.format((get or 0)),"reply_to_message_id":message.message_id,"parse_mode":"html"})
if not redis.sismember("{}Nbot:ReplySendBOT".format(BOT_ID),chatID):
if redis.hexists("{}Nbot:TXreplys".format(BOT_ID),text):
tx = redis.hget("{}Nbot:TXreplys".format(BOT_ID),text)
try:
rep = {"#cn":"{cn}","#age":"{age}","#fn":"{fn}","#id":"{id}","#username":"{username}","#msgs":"{msgs}","#stast":"{stast}","#edits":"{edits}","#rate":"{rate}","{us}":"{username}","#us":"{username}"}
for v in rep.keys():
tx = tx.replace(v,rep[v])
Bot("sendMessage",{"chat_id":chatID,"text":tx.format(fn=userFN,username=("@"+username or "n"),id=userID,stast=IDrank(redis,userID,chatID,r),cn=title),"reply_to_message_id":message.message_id,"parse_mode":"html"})
except Exception as e:
Bot("sendMessage",{"chat_id":chatID,"text":tx,"reply_to_message_id":message.message_id,"parse_mode":"html"})
if redis.hexists("{}Nbot:STreplys".format(BOT_ID),text):
ID = redis.hget("{}Nbot:STreplys".format(BOT_ID),text)
Bot("sendSticker",{"chat_id":chatID,"sticker":ID,"reply_to_message_id":message.message_id})
if redis.hexists("{}Nbot:GFreplys".format(BOT_ID),text):
ID = redis.hget("{}Nbot:GFreplys".format(BOT_ID),text)
Bot("sendanimation",{"chat_id":chatID,"animation":ID,"reply_to_message_id":message.message_id})
if redis.hexists("{}Nbot:{}:VOreplys".format(BOT_ID,chatID),text):
ID = redis.hget("{}Nbot:VOreplys".format(BOT_ID),text)
Bot("sendvoice",{"chat_id":chatID,"voice":ID,"reply_to_message_id":message.message_id})
if redis.hexists("{}Nbot:PHreplys".format(BOT_ID,chatID),text):
ID = redis.hget("{}Nbot:PHreplys".format(BOT_ID),text)
Bot("sendphoto",{"chat_id":chatID,"photo":ID,"reply_to_message_id":message.message_id})
if redis.hexists("{}Nbot:DOreplys".format(BOT_ID,chatID),text):
ID = redis.hget("{}Nbot:DOreplys".format(BOT_ID),text)
Bot("sendDocument",{"chat_id":chatID,"document":ID,"reply_to_message_id":message.message_id})
if not redis.sismember("{}Nbot:ReplySend".format(BOT_ID),chatID):
if redis.hexists("{}Nbot:{}:TXreplys".format(BOT_ID,chatID),text):
tx = redis.hget("{}Nbot:{}:TXreplys".format(BOT_ID,chatID),text)
try:
rep = {"#cn":"{cn}","#age":"{age}","#fn":"{fn}","#id":"{id}","#username":"{username}","#msgs":"{msgs}","#stast":"{stast}","#edits":"{edits}","#rate":"{rate}","{us}":"{username}","#us":"{username}"}
for v in rep.keys():
tx = tx.replace(v,rep[v])
Bot("sendMessage",{"chat_id":chatID,"text":tx.format(fn=userFN,username=("@"+username or "n"),id=userID,stast=IDrank(redis,userID,chatID,r),cn=title),"reply_to_message_id":message.message_id,"parse_mode":"html"})
except Exception as e:
Bot("sendMessage",{"chat_id":chatID,"text":tx,"reply_to_message_id":message.message_id,"parse_mode":"html"})
if redis.hexists("{}Nbot:{}:STreplys".format(BOT_ID,chatID),text):
ID = redis.hget("{}Nbot:{}:STreplys".format(BOT_ID,chatID),text)
Bot("sendSticker",{"chat_id":chatID,"sticker":ID,"reply_to_message_id":message.message_id})
if redis.hexists("{}Nbot:{}:GFreplys".format(BOT_ID,chatID),text):
ID = redis.hget("{}Nbot:{}:GFreplys".format(BOT_ID,chatID),text)
Bot("sendanimation",{"chat_id":chatID,"animation":ID,"reply_to_message_id":message.message_id})
if redis.hexists("{}Nbot:{}:VOreplys".format(BOT_ID,chatID),text):
ID = redis.hget("{}Nbot:{}:VOreplys".format(BOT_ID,chatID),text)
Bot("sendvoice",{"chat_id":chatID,"voice":ID,"reply_to_message_id":message.message_id})
if redis.hexists("{}Nbot:{}:AUreplys".format(BOT_ID,chatID),text):
ID = redis.hget("{}Nbot:{}:AUreplys".format(BOT_ID,chatID),text)
Bot("sendaudio",{"chat_id":chatID,"audio":ID,"reply_to_message_id":message.message_id})
if redis.hexists("{}Nbot:{}:PHreplys".format(BOT_ID,chatID),text):
ID = redis.hget("{}Nbot:{}:PHreplys".format(BOT_ID,chatID),text)
Bot("sendphoto",{"chat_id":chatID,"photo":ID,"reply_to_message_id":message.message_id})
if redis.hexists("{}Nbot:{}:DOreplys".format(BOT_ID,chatID),text):
ID = redis.hget("{}Nbot:{}:DOreplys".format(BOT_ID,chatID),text)
Bot("sendDocument",{"chat_id":chatID,"document":ID,"reply_to_message_id":message.message_id})
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateMsgs,args=(client, message,redis))
t.daemon = True
t.start()
importlib.reload(U)
except Exception as e:
import traceback
traceback.print_exc()
print(e)
pass
|
main.py
|
import cv2
import time
import threading
import math
import sys
import numpy
import os
from ast import Pass
from inputs import get_gamepad
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
# USB camera setup
src = 'v4l2src device=/dev/video0 ! video/x-raw, width=3840, height=2160, format=NV12 ! appsink'
cap = cv2.VideoCapture(src,cv2.CAP_GSTREAMER)
if cap.isOpened() is False:
raise("IO Error")
crop_factor = 1
crop_dest_y = 0
crop_dest_x = 0
widowWidth = 1920
windowHeight = 1080
fps_flag = 0
disp_fps = 0
cvt_fps = 0
fhd_flag = False
reset_flag = False
class Gamepad(object):
# for 8bit normalize
MAX_JOY_VAL = math.pow(2, 8)
def __init__(self):
self.init_flag = False
self.LeftJoystickY = 0.0
self.LeftJoystickX = 0.0
self.A = 0
self.B = 0
self.X = 0
self.Y = 0
self.HatY = 0
self.HatX = 0
self._monitor_thread = threading.Thread(target=self._monitor_controller, args=())
self._monitor_thread.daemon = True
self._monitor_thread.start()
def read(self):
stickx = self.LeftJoystickX
sticky = self.LeftJoystickY
a = self.A
b = self.B
hy = self.HatY
hx = self.HatX
return [stickx, sticky, a, b, hy, hx]
def cropZoom(self):
global crop_factor
global crop_dest_y
global crop_dest_x
global fps_flag
global fhd_flag
fps_flag = self.B
if self.Y:
fhd_flag = not fhd_flag
# X axis
if self.LeftJoystickX < 0.01 and self.LeftJoystickX > -0.01:
Pass
else:
crop_dest_x = (int)(crop_dest_x + round((self.LeftJoystickX * 10)/2,-1))
# Y axis
if self.LeftJoystickY < 0.01 and self.LeftJoystickY > -0.01:
Pass
else:
crop_dest_y = (int)(crop_dest_y + round((self.LeftJoystickY * 10)/2,-1))
# Zoom
if self.HatY < 0:
crop_factor = crop_factor + 0.1
elif self.HatY > 0:
crop_factor = crop_factor - 0.1
else:
Pass
if self.HatX < 0:
crop_factor = crop_factor + 1
elif self.HatX > 0:
crop_factor = crop_factor - 1
else:
Pass
# Reset
if self.A == 1:
crop_factor = 1
crop_dest_y = 0
crop_dest_x = 0
if crop_factor <= 1:
crop_factor = 1
return
def _monitor_controller(self):
while not self.init_flag:
events = get_gamepad()
for event in events:
if event.code == 'ABS_Y': #HORI Main Stick Y
self.LeftJoystickY = (event.state*10 / Gamepad.MAX_JOY_VAL)-5 # normalize between -5 and 5
elif event.code == 'ABS_X': #HORI Main Stick X
self.LeftJoystickX = (event.state*10 / Gamepad.MAX_JOY_VAL)-5 # normalize between -5 and 5
elif event.code == 'BTN_SOUTH': #HORI Y
self.Y = event.state
elif event.code == 'BTN_NORTH': #HORI X
self.X = event.state
elif event.code == 'BTN_C': #HORI A
self.A = event.state
elif event.code == 'BTN_EAST': #HORI B
self.B = event.state
elif event.code == 'ABS_HAT0Y': #HORI Hat Y
self.HatY = event.state
elif event.code == 'ABS_HAT0X': #HORI Hat X
self.HatX = event.state
def draw():
display_start_time = time.time()
ret, img = cap.read()
if ret == False:
sys.exit()
img = cv2.cvtColor(img,cv2.COLOR_YUV2RGB_NV12)
########## ROI Cropping ##########
gamepad.cropZoom()
imageh, imagew = img.shape[:2]
crop_height = imageh * (1/crop_factor)
crop_width = imagew * (1/crop_factor)
global crop_dest_y
global crop_dest_x
global disp_fps
global cvt_fps
if abs(crop_dest_y) > imageh/2-(imageh/crop_factor)/2:
crop_dest_y = (int)((imageh/2-(imageh/crop_factor)/2)*numpy.sign(crop_dest_y))
if abs(crop_dest_x) > imagew/2-(imagew/crop_factor)/2:
crop_dest_x = (int)((imagew/2-(imagew/crop_factor)/2)*numpy.sign(crop_dest_x))
y1 = (int)((crop_height/2)*(crop_factor-1)) + crop_dest_y
y2 = (int)((crop_height/2)*(crop_factor+1)) + crop_dest_y
x1 = (int)((crop_width/2)*(crop_factor-1)) + crop_dest_x
x2 = (int)((crop_width/2)*(crop_factor+1)) + crop_dest_x
if y1 < 0: y1 = 0
if y2 < 0: y2 = 0
if x1 < 0: x1 = 0
if x2 < 0: x2 = 0
img = img[y1:y2,x1:x2]
########## ROI Cropping END ##########
if fhd_flag:
img = cv2.resize(img, dsize=(1920, 1080), interpolation=cv2.INTER_NEAREST)
else:
img = cv2.resize(img, dsize=(1440, 810), interpolation=cv2.INTER_NEAREST)
cvt_fps = round((1.0 / (time.time() - display_start_time)),1)
if fps_flag:
cv2.putText(img,
"CFPS:"+str(cvt_fps)+" DFPS:"+str(disp_fps)+" CropF:"+str(round(crop_factor,2))+" DstXY:"+str(crop_dest_x)+","+str(crop_dest_y),
org=(10, 50),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1.0,
color=(0, 170, 0),
thickness=2,
lineType=cv2.LINE_4)
h, w = img.shape[:2]
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, w, h, 0, GL_RGB, GL_UNSIGNED_BYTE, img)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glColor3f(1.0, 1.0, 1.0)
glEnable(GL_TEXTURE_2D)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glBegin(GL_QUADS)
glTexCoord2d(0.0, 1.0)
glVertex3d(-1.0, -1.0, 0.0)
glTexCoord2d(1.0, 1.0)
glVertex3d( 1.0, -1.0, 0.0)
glTexCoord2d(1.0, 0.0)
glVertex3d( 1.0, 1.0, 0.0)
glTexCoord2d(0.0, 0.0)
glVertex3d(-1.0, 1.0, 0.0)
glEnd()
glFlush()
glutSwapBuffers()
disp_fps = round((1.0 / (time.time() - display_start_time)),1)
def init():
glClearColor(0.7, 0.7, 0.7, 0.7)
def idle():
glutPostRedisplay()
def reshape(w, h):
glViewport(0, 0, w, h)
glLoadIdentity()
glOrtho(-w / widowWidth, w / widowWidth, -h / windowHeight, h / windowHeight, -1.0, 1.0)
def keyboard(key, x, y):
key = key.decode('utf-8')
# press q to exit
if key == 'q':
print('exit...')
sys.exit()
gamepad = Gamepad()
if __name__ == "__main__":
glutInitWindowPosition(0, 0)
glutInitWindowSize(widowWidth, windowHeight)
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGB)
glutEnterGameMode()
glutSetCursor(GLUT_CURSOR_NONE)
#glutCreateWindow("Display")
glutDisplayFunc(draw)
glutReshapeFunc(reshape)
glutKeyboardFunc(keyboard)
init()
glutIdleFunc(idle)
glutMainLoop()
|
video.py
|
import threading
import requests
from config import App_title
import youtube_dl
from pyrogram.types import InlineKeyboardMarkup,InlineKeyboardButton
from modules.control import run_rclone
import sys
import requests
import os
import time
temp_time= time.time()
def progress(current, total,client,message,name):
print(f"{current * 100 / total:.1f}%")
pro=f"{current * 100 / total:.1f}%"
try:
client.edit_message_text(chat_id=message.chat.id,message_id=message.message_id,text=f"{name}\n上传中:{pro}")
except Exception as e:
print("e")
class Download_video():
def download_video_status(self,d):
global temp_time
if d['status'] == 'downloading':
time_end = time.time()
if time_end - temp_time < 2:
None
else:
temp_time = time.time()
# print(d)
text="下载中 " + d['_percent_str'] + " " + d['_speed_str']
try:
self.client.edit_message_text(text=text, chat_id=self.info.chat.id, message_id=self.info.message_id,
parse_mode='markdown')
print("视频正在下载,保持唤醒")
print(requests.get(url=f"https://{App_title}.herokuapp.com/"))
sys.stdout.flush()
except:
None
def __init__(self,client, call):
#调用父类的构函
self.client=client
self.call=call
def download_video(self):
try:
import re
print("开始下载视频")
sys.stdout.flush()
message_chat_id = self.call.message.chat.id
self.info = self.client.send_message(chat_id=message_chat_id, text="开始下载", parse_mode='markdown')
caption = str(self.call.message.caption)
web_url = re.findall("web_url:(.*?)\n", caption, re.S)[0]
print(web_url)
sys.stdout.flush()
ydl_opts = {
'format': "bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best[ext=flv]/best' --merge-output-format mp4",
'quiet': True,
'no_warnings': True,
'progress_hooks': [self.download_video_status]
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
result = ydl.extract_info(
url=web_url,
download=True)
video_name = ydl.prepare_filename(result)
print(video_name)
except Exception as e:
print(f"下载视频失败 :{e}")
sys.stdout.flush()
return
self.client.edit_message_text(text=f"{video_name}\n下载完成,开始上传", chat_id=self.info.chat.id,
message_id=self.info.message_id,
parse_mode='markdown')
if self.call.data =="videorclone":
print(f"{video_name}上传到网盘")
sys.stdout.flush()
run_rclone(video_name, video_name, info=self.info, file_num=1, client=self.client, message=self.info)
os.remove(video_name)
self.client.delete_messages(chat_id=self.call.message.chat.id,message_ids=self.call.message.message_id)
else:
print(f"{video_name}发送到TG")
sys.stdout.flush()
self.client.send_video(chat_id=self.call.message.chat.id,video=video_name,caption=caption ,progress=progress,
progress_args=(self.client, self.info, video_name,))
os.remove(video_name)
self.client.delete_messages(chat_id=self.call.message.chat.id, message_ids=self.call.message.message_id)
def get_video_info(client, message, url):
try:
print(url)
sys.stdout.flush()
ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s.%(ext)s'})
result = ydl.extract_info(
url=url,
download=False,
)
#print(result)
video_name=result['title']
video_description=result['description']
video_img=result['thumbnails'][len(result['thumbnails'])-1]["url"]
video_uploader=result['uploader']
web_url=result['webpage_url']
text=f"视频名称:{video_name}\n" \
f"作者:{video_uploader}\n" \
f"web_url:{web_url}\n" \
f"简介:{video_description}\n"
print(text)
print(video_img)
sys.stdout.flush()
except Exception as e:
client.send_message(chat_id=message.chat.id, text=f"无法获取视频信息:\n{e}", parse_mode='markdown')
return
new_inline_keyboard = [
[
InlineKeyboardButton(
text="上传网盘",
callback_data=f"videorclone"
),
InlineKeyboardButton(
text=f"发送给我",
callback_data=f"videotg"
)
]
]
img = requests.get(url=video_img)
img_name=f"{message.chat.id}{message.message_id}.png"
with open(img_name, 'wb') as f:
f.write(img.content)
f.close()
new_reply_markup = InlineKeyboardMarkup(inline_keyboard=new_inline_keyboard)
client.send_photo(caption=text[0:1024], photo=img_name,chat_id=message.chat.id,
parse_mode='markdown', reply_markup=new_reply_markup)
os.remove(img_name)
def start_get_video_info(client, message):
keywords = message.text.split()[1]
print(keywords)
t1 = threading.Thread(target=get_video_info, args=(client, message, keywords))
t1.start()
|
Analysis.py
|
"""
This module contains the ``analysis`` class.
It includes common classes for file management and messaging and all
calls to AEDT modules like the modeler, mesh, postprocessing, and setup.
"""
from __future__ import absolute_import
import os
import shutil
import threading
import warnings
from collections import OrderedDict
from pyaedt.generic.general_methods import aedt_exception_handler
from pyaedt.modeler.modeler_constants import CoordinateSystemAxis, CoordinateSystemPlane, GravityDirection, Plane
from pyaedt.modules.Boundary import NativeComponentObject
from pyaedt.modules.DesignXPloration import (
DOESetups,
DXSetups,
OptimizationSetups,
ParametericsSetups,
SensitivitySetups,
StatisticalSetups,
)
from pyaedt.modules.MaterialLib import Materials
from pyaedt.modules.SetupTemplates import SetupKeys
from pyaedt.modules.SolutionType import SetupTypes, SolutionType
from pyaedt.modules.SolveSetup import Setup
from pyaedt.application.Design import Design
from pyaedt.application.JobManager import update_hpc_option
class Analysis(Design, object):
"""Contains all common analysis functions.
This class is inherited in the caller application and is accessible through it ( eg. ``hfss.method_name``).
It is automatically initialized by a call from an application, such as HFSS or Q3D.
See the application function for its parameter descriptions.
Parameters
----------
application : str
Application that is to initialize the call.
projectname : str
Name of the project to select or the full path to the project
or AEDTZ archive to open.
designname : str
Name of the design to select.
solution_type : str
Solution type to apply to the design.
setup_name : str
Name of the setup to use as the nominal.
specified_version : str
Version of AEDT to use.
NG : bool
Whether to run AEDT in the non-graphical mode.
new_desktop_session : bool
Whether to launch an instance of AEDT in a new thread, even if
another instance of the ``specified_version`` is active on the
machine.
close_on_exit : bool
Whether to release AEDT on exit.
student_version : bool
Whether to enable the student version of AEDT.
"""
def __init__(
self,
application,
projectname,
designname,
solution_type,
setup_name,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
):
self.setups = []
Design.__init__(
self,
application,
projectname,
designname,
solution_type,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
)
self.ooptimetrics = self._odesign.GetModule("Optimetrics")
self.ooutput_variable = self._odesign.GetModule("OutputVariable")
self.logger.glb.info("Design Loaded")
self._setup = None
if setup_name:
self.analysis_setup = setup_name
self.solution_type = solution_type
self._materials = Materials(self)
self.logger.glb.info("Materials Loaded")
self._available_variations = self.AvailableVariations(self)
if "HFSS 3D Layout Design" in self.design_type:
self.oanalysis = self._odesign.GetModule("SolveSetups")
elif "EMIT" in self.design_type:
self.oanalysis = None
elif "Circuit Design" in self.design_type or "Twin Builder" in self.design_type:
self.oanalysis = self._odesign.GetModule("SimSetup")
else:
self.oanalysis = self._odesign.GetModule("AnalysisSetup")
self.setups = [self.get_setup(setup_name) for setup_name in self.setup_names]
self.opti_parametric = ParametericsSetups(self)
self.opti_optimization = OptimizationSetups(self)
self.opti_doe = DOESetups(self)
self.opti_designxplorer = DXSetups(self)
self.opti_sensitivity = SensitivitySetups(self)
self.opti_statistical = StatisticalSetups(self)
self.native_components = self._get_native_data()
@property
def output_variables(self):
"""List of Output variables.
Returns
-------
list of str
"""
return self.ooutput_variable.GetOutputVariables()
@property
def materials(self):
"""Manages materials in the project.
Returns
-------
:class:`pyaedt.modules.MaterialLib.Materials`
Manages materials in the project.
"""
return self._materials
@property
def Position(self):
"""Position of the object.
Returns
-------
type
Position object.
"""
return self.modeler.Position
@property
def available_variations(self):
"""Available variation object.
Returns
-------
:class:`pyaedt.application.Analysis.Analysis.AvailableVariations`
Available variation object.
"""
return self._available_variations
@property
def CoordinateSystemAxis(self):
"""Coordinate system axis constant.
Returns
-------
:class:`pyaedt.modeler.modeler_constants.CoordinateSystemAxis`
Coordinate system axis constants tuple (.X, .Y, .Z).
"""
return CoordinateSystemAxis()
@property
def CoordinateSystemPlane(self):
"""Coordinate system plane constants.
Returns
-------
:class:`pyaedt.modeler.modeler_constants.CoordinateSystemPlane`
Coordinate system plane constants tuple (.XY, .YZ, .XZ).
"""
return CoordinateSystemPlane()
@property
def View(self):
"""Planes. (To check if redundant to CoordinateSystemPlane.)
Returns
-------
tuple
Coordinate system plane string tuple ("XY", "YZ", "XZ").
"""
return Plane()
@property
def GravityDirection(self):
"""Gravity direction. (To check if redundant.)
Returns
-------
tuple
Gravity direction tuple (XNeg, YNeg, ZNeg, XPos, YPos, ZPos).
"""
return GravityDirection()
@property
def modeler(self):
"""Modeler.
Returns
-------
:class:`pyaedt.modeler.Modeler.Modeler`
Modeler object.
"""
return self._modeler
@property
def mesh(self):
"""Mesh.
Returns
-------
:class:`pyaedt.modules.Mesh.Mesh`
Mesh object.
"""
return self._mesh
@property
def post(self):
"""PostProcessor.
Returns
-------
:class:`pyaedt.modules.AdvancedPostProcessing.PostProcessor`
PostProcessor object.
"""
return self._post
@property
def analysis_setup(self):
"""Analysis setup.
Returns
-------
str
Name of the active or first analysis setup.
"""
if self._setup:
return self._setup
elif self.existing_analysis_setups:
return self.existing_analysis_setups[0]
else:
self._setup = None
return self._setup
@analysis_setup.setter
def analysis_setup(self, setup_name):
setup_list = self.existing_analysis_setups
if setup_list:
assert setup_name in setup_list, "Invalid setup name {}".format(setup_name)
self._setup = setup_name
else:
self._setup = setup_list[0]
# return self._setup
@property
def existing_analysis_sweeps(self):
"""Existing analysis sweeps.
Returns
-------
list of str
List of all analysis sweeps in the design.
"""
setup_list = self.existing_analysis_setups
sweep_list = []
if self.solution_type == "HFSS3DLayout" or self.solution_type == "HFSS 3D Layout Design":
sweep_list = self.oanalysis.GetAllSolutionNames()
sweep_list = [i for i in sweep_list if "Adaptive Pass" not in i]
sweep_list.reverse()
else:
for el in setup_list:
if self.solution_type == "HFSS3DLayout" or self.solution_type == "HFSS 3D Layout Design":
sweeps = self.oanalysis.GelAllSolutionNames()
elif self.solution_type in SetupKeys.defaultAdaptive.keys():
setuptype = SetupKeys.defaultAdaptive[self.solution_type]
if setuptype:
sweep_list.append(el + " : " + setuptype)
try:
sweeps = list(self.oanalysis.GetSweeps(el))
except:
sweeps = []
for sw in sweeps:
sweep_list.append(el + " : " + sw)
return sweep_list
@property
def nominal_adaptive(self):
"""Nominal adaptive sweep.
Returns
-------
str
Name of the nominal adaptive sweep.
"""
if len(self.existing_analysis_sweeps) > 0:
return self.existing_analysis_sweeps[0]
else:
return ""
@property
def nominal_sweep(self):
"""Nominal sweep.
Returns
-------
str
Name of the last adaptive sweep if a sweep is available or
the name of the nominal adaptive sweep if present.
"""
if len(self.existing_analysis_sweeps) > 1:
return self.existing_analysis_sweeps[1]
else:
return self.nominal_adaptive
@property
def existing_analysis_setups(self):
"""Existing analysis setups.
Returns
-------
list of str
List of all analysis setups in the design.
"""
setups = list(self.oanalysis.GetSetups())
return setups
@property
def setup_names(self):
"""Setup names.
Returns
-------
list of str
List of names of all analysis setups in the design.
"""
return self.oanalysis.GetSetups()
@property
def SimulationSetupTypes(self):
"""Simulation setup types.
Returns
-------
SetupTypes
List of all simulation setup types categorized by application.
"""
return SetupTypes()
@property
def SolutionTypes(self):
"""Solution types.
Returns
-------
SolutionType
List of all solution type categorized by application.
"""
return SolutionType()
@aedt_exception_handler
def _get_native_data(self):
"""Retrieve Native Components data."""
boundaries = []
try:
data_vals = self.design_properties["ModelSetup"]["GeometryCore"]["GeometryOperations"][
"SubModelDefinitions"
]["NativeComponentDefinition"]
if not isinstance(data_vals, list) and isinstance(data_vals, (OrderedDict, dict)):
boundaries.append(
NativeComponentObject(
self,
data_vals["NativeComponentDefinitionProvider"]["Type"],
data_vals["BasicComponentInfo"]["ComponentName"],
data_vals,
)
)
for ds in data_vals:
try:
if isinstance(ds, (OrderedDict, dict)):
boundaries.append(
NativeComponentObject(
self,
ds["NativeComponentDefinitionProvider"]["Type"],
ds["BasicComponentInfo"]["ComponentName"],
ds,
)
)
except:
pass
except:
pass
return boundaries
class AvailableVariations(object):
def __init__(self, app):
"""Contains available variations.
Parameters
----------
app :
Inherited parent object.
Returns
-------
object
Parent object.
"""
self._app = app
@property
def variables(self):
"""Variables.
Returns
-------
list of str
List of names of independent variables.
"""
return [i for i in self._app.variable_manager.independent_variables]
@aedt_exception_handler
def variations(self, setup_sweep=None):
"""Variations.
Parameters
----------
setup_sweep : str, optional
Setup name with the sweep to search for variations on. The default is ``None``.
Returns
-------
list of str
List of variation families.
"""
if not setup_sweep:
setup_sweep = self._app.existing_analysis_sweeps[0]
vs = self._app.osolution.GetAvailableVariations(setup_sweep)
families = []
for v in vs:
variations = v.split(" ")
family = []
for el in self.variables:
family.append(el + ":=")
i = 0
while i < len(variations):
if variations[i][0 : len(el)] == el:
family.append([variations[i][len(el) + 2 : -1]])
i += 1
families.append(family)
return families
@property
def nominal(self):
"""Nominal."""
families = []
for el in self.variables:
families.append(el + ":=")
families.append(["Nominal"])
return families
@property
def nominal_w_values(self):
"""Nominal with values."""
families = []
if self._app.design_type == "HFSS 3D Layout Design":
listvar = list(self._app._odesign.GetVariables())
for el in listvar:
families.append(el + ":=")
families.append([self._app._odesign.GetVariableValue(el)])
else:
variation = self._app._odesign.GetNominalVariation()
for el in self.variables:
families.append(el + ":=")
families.append([self._app._odesign.GetVariationVariableValue(variation, el)])
return families
@property
def nominal_w_values_dict(self):
"""Nominal with values in a dictionary."""
families = {}
if self._app.design_type == "HFSS 3D Layout Design":
listvar = list(self._app._odesign.GetVariables())
for el in listvar:
families[el] = self._app._odesign.GetVariableValue(el)
else:
variation = self._app._odesign.GetNominalVariation()
for el in self.variables:
families[el] = self._app._odesign.GetVariationVariableValue(variation, el)
return families
@property
def all(self):
"""All."""
families = []
for el in self.variables:
families.append(el + ":=")
families.append(["All"])
return families
class AxisDir(object):
"""Contains constants for the axis directions."""
(XNeg, YNeg, ZNeg, XPos, YPos, ZPos) = range(0, 6)
@aedt_exception_handler
def get_setups(self):
"""Retrieve setups.
Returns
-------
list of str
List of names of all setups.
"""
setups = self.oanalysis.GetSetups()
return list(setups)
@aedt_exception_handler
def get_nominal_variation(self):
"""Retrieve the nominal variation.
Returns
-------
list of str
List of nominal variations.
"""
return self.available_variations.nominal
@aedt_exception_handler
def get_sweeps(self, name):
"""Retrieve all sweep for a setup.
Parameters
----------
name : str
Name of the setup.
Returns
-------
list of str
List of names of all sweeps for the setup.
"""
sweeps = self.oanalysis.GetSweeps(name)
return list(sweeps)
@aedt_exception_handler
def export_parametric_results(self, sweepname, filename, exportunits=True):
"""Export a list of all parametric variations solved for a sweep to a CSV file.
Parameters
----------
sweepname : str
Name of the optimetrics sweep.
filename : str
Full path and name for the CSV file.
exportunits : bool, optional
Whether to export units with the value. The default is ``True``. When ``False``,
only the value is exported.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self.ooptimetrics.ExportParametricResults(sweepname, filename, exportunits)
return True
@aedt_exception_handler
def analyze_from_initial_mesh(self):
"""Revert the solution to the initial mesh and re-run the solve.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self.oanalysis.RevertSetupToInitial(self._setup)
self.analyze_nominal()
return True
@aedt_exception_handler
def analyse_nominal(self):
"""Solve the nominal design.
.. deprecated:: 0.4.0
Use :func:`Analysis.analyze_nominal` instead.
"""
warnings.warn("`analyse_nominal` is deprecated. Use `analyze_nominal` instead.", DeprecationWarning)
self.analyze_nominal()
@aedt_exception_handler
def analyze_nominal(self, num_cores=None, num_tasks=None, num_gpu=None, acf_file=None):
"""Solve the nominal design.
Parameters
----------
num_cores : int, optional
Number of Simulation cores.
num_tasks : int, optional
Number of Simulation tasks.
num_gpu : int, optional
Number of Simulation Gpu to use.
acf_file : str, optional
Full path to custom acf_file.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
return self.analyze_setup(self.analysis_setup, num_cores, num_tasks, num_gpu, acf_file)
@aedt_exception_handler
def generate_unique_setup_name(self, setup_name=None):
"""Generate a new setup with an unique name.
Parameters
----------
setup_name : str, optional
Name of the setup. The default is ``None``.
Returns
-------
str
Name of the setup.
"""
if not setup_name:
setup_name = "Setup"
index = 2
while setup_name in self.existing_analysis_setups:
setup_name = setup_name + "_{}".format(index)
index += 1
return setup_name
@aedt_exception_handler
def create_setup(self, setupname="MySetupAuto", setuptype=None, props={}):
"""Create a setup.
Parameters
----------
setupname : str, optional
Name of the setup. The default is ``"MySetupAuto"``.
setuptype : optional
Type of the setup. The default is ``None``, in which case
the default type is applied.
props : dict, optional
Dictionary of analysis properties appropriate for the design and analysis.
If no values are passed, default values will be used.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup`
Examples
--------
Create a setup for SBR+ setup using advanced Doppler
processing for automotive radar.
>>> import pyaedt
>>> hfss = pyaedt.Hfss(solution_type='SBR+')
>>> setup1 = hfss.create_setup(setupname='Setup1')
>>> setup1.props["IsSbrRangeDoppler"] = True
>>> setup1.props["SbrRangeDopplerTimeVariable"] = "time_var"
>>> setup1.props["SbrRangeDopplerCenterFreq"] = "76.5GHz"
>>> setup1.props["SbrRangeDopplerRangeResolution"] = "0.15meter"
>>> setup1.props["SbrRangeDopplerRangePeriod"] = "100meter"
>>> setup1.props["SbrRangeDopplerVelocityResolution"] = "0.2m_per_sec"
>>> setup1.props["SbrRangeDopplerVelocityMin"] = "-30m_per_sec"
>>> setup1.props["SbrRangeDopplerVelocityMax"] = "30m_per_sec"
>>> setup1.props["DopplerRayDensityPerWavelength"] = "0.2"
>>> setup1.props["MaxNumberOfBounces"] = "3"
>>> setup1.update()
...
pyaedt info: Sweep was created correctly.
"""
if setuptype is None:
if self.design_type == "Icepak" and self.solution_type == "Transient":
setuptype = SetupKeys.defaultSetups["TransientTemperatureAndFlow"]
else:
setuptype = SetupKeys.defaultSetups[self.solution_type]
name = self.generate_unique_setup_name(setupname)
setup = Setup(self, setuptype, name)
setup.create()
if props:
for el in props:
setup.props[el] = props[el]
setup.update()
self.analysis_setup = name
self.setups.append(setup)
return setup
@aedt_exception_handler
def delete_setup(self, setupname):
"""Delete a setup.
Parameters
----------
setupname : str
Name of the setup.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
Examples
--------
Create a setup and then delete it.
>>> import pyaedt
>>> hfss = pyaedt.Hfss()
>>> setup1 = hfss.create_setup(setupname='Setup1')
>>> hfss.delete_setup(setupname='Setup1')
...
pyaedt info: Sweep was deleted correctly.
"""
if setupname in self.existing_analysis_setups:
self.oanalysis.DeleteSetups([setupname])
for s in self.setups:
if s.name == setupname:
self.setups.remove(s)
return True
return False
@aedt_exception_handler
def edit_setup(self, setupname, properties_dict):
"""Modify a setup.
Parameters
----------
setupname : str
Name of the setup.
properties_dict : dict
Dictionary containing the property to update with the value.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup`
"""
setuptype = SetupKeys.defaultSetups[self.solution_type]
setup = Setup(self, setuptype, setupname, isnewsetup=False)
setup.update(properties_dict)
self.analysis_setup = setupname
return setup
@aedt_exception_handler
def get_setup(self, setupname):
"""Get the setup from the current design.
Parameters
----------
setupname : str
Name of the setup.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup`
"""
setuptype = SetupKeys.defaultSetups[self.solution_type]
setup = Setup(self, setuptype, setupname, isnewsetup=False)
if setup.props:
self.analysis_setup = setupname
return setup
@aedt_exception_handler
def create_output_variable(self, variable, expression):
"""Create or modify an output variable.
Parameters
----------
variable : str
Name of the variable.
expression :
Value for the variable.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
oModule = self.ooutput_variable
if variable in self.output_variables:
oModule.EditOutputVariable(
variable, expression, variable, self.existing_analysis_sweeps[0], self.solution_type, []
)
else:
oModule.CreateOutputVariable(variable, expression, self.existing_analysis_sweeps[0], self.solution_type, [])
return True
@aedt_exception_handler
def get_output_variable(self, variable):
"""Retrieve the value of the output variable.
Parameters
----------
variable : str
Name of the variable.
Returns
-------
type
Value of the output variable.
"""
assert variable in self.output_variables, "Output variable {} does not exist.".format(variable)
nominal_variation = self.odesign.GetNominalVariation()
sol_type = self.solution_type
value = self.ooutput_variable.GetOutputVariableValue(
variable, nominal_variation, self.existing_analysis_sweeps[0], self.solution_type, []
)
return value
@aedt_exception_handler
def get_object_material_properties(self, object_list=None, prop_names=None):
"""Retrieve the material properties for a list of given objects and return them in a dictionary.
This high-level function ignores objects with no defined material properties.
Parameters
----------
object_list : list, optional
List of objects for which to get material_properties. The default is ``None``,
in which case all objects are considered.
prop_names : str or list
The property or list of properties to export. The default is ``None``, in
which case all properties are exported.
Returns
-------
dict
Dictionary of objects with material properties.
"""
if object_list:
if not isinstance(object_list, list):
object_list = [object_list]
else:
object_list = self.modeler.primitives.object_names
if prop_names:
if not isinstance(prop_names, list):
prop_names = [prop_names]
dict = {}
for entry in object_list:
mat_name = self.modeler.primitives[entry].material_name
mat_props = self._materials[mat_name]
if prop_names is None:
dict[entry] = mat_props._props
else:
dict[entry] = {}
for prop_name in prop_names:
dict[entry][prop_name] = mat_props._props[prop_name]
return dict
@aedt_exception_handler
def analyze_setup(self, name, num_cores=None, num_tasks=None, num_gpu=None, acf_file=None):
"""Analyze a specific design setup.
Parameters
----------
name : str
Name of the setup, which can be an optimetric setup or a simple setup.
num_cores : int, optional
Number of Simulation cores.
num_tasks : int, optional
Number of Simulation tasks.
num_gpu : int, optional
Number of Simulation Gpu to use.
acf_file : str, optional
Full path to custom acf_file.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
active_config = self._desktop.GetRegistryString(r"Desktop/ActiveDSOConfigurations/"+self.design_type)
if acf_file:
self._desktop.SetRegistryFromFile(acf_file)
name = ""
with open(acf_file, 'r') as f:
lines = f.readlines()
for line in lines:
if "ConfigName" in line:
name = line.strip().split("=")[1]
break
if name:
try:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/"+self.design_type, name)
except:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, active_config)
elif num_gpu or num_tasks or num_cores:
config_name = "pyaedt_config"
source_name = os.path.join(self.pyaedt_dir, "misc", "pyaedt_local_config.acf")
target_name = os.path.join(self.project_path, config_name + ".acf")
shutil.copy2(source_name, target_name)
if num_cores:
update_hpc_option(target_name, "NumCores", num_cores, False)
if num_gpu:
update_hpc_option(target_name, "NumGPUs", num_gpu, False)
if num_tasks:
update_hpc_option(target_name, "NumEngines", num_tasks, False)
update_hpc_option(target_name, "ConfigName", config_name, True)
update_hpc_option(target_name, "DesignType", self.design_type, True)
try:
self._desktop.SetRegistryFromFile(target_name)
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, config_name)
except:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, active_config)
if name in self.existing_analysis_setups:
try:
self.logger.glb.info("Solving design setup %s", name)
self.odesign.Analyze(name)
except:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, active_config)
self.logger.glb.error("Error in Solving Setup %s", name)
return False
else:
try:
self.logger.glb.info("Solving Optimetrics")
self.ooptimetrics.SolveSetup(name)
except:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, active_config)
self.logger.glb.error("Error in Solving or Missing Setup %s", name)
return False
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, active_config)
self.logger.glb.info("Design setup %s solved correctly", name)
return True
@aedt_exception_handler
def solve_in_batch(self, filename=None, machine="local", run_in_thread=False):
"""Analyze a design setup in batch mode.
.. note::
To use this function, the AEDT project must be closed.
Parameters
----------
filename : str, optional
Name of the setup. The default is ``None``, which means that the active project
is to be solved.
machine : str, optional
Name of the machine if remote. The default is ``"local"``.
run_in_thread : bool, optional
Whether the batch command is to be submitted as a thread. The default is
``False``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if not filename:
filename = self.project_file
self.close_project()
if machine == "local":
# -Monitor option used as workaround for R2 BatchSolve not exiting properly at the end of the Batch job
options = " -ng -BatchSolve -Monitor "
else:
options = " -ng -distribute -machinelist list=" + machine + " -Batchsolve "
self.logger.info("Batch Solve Options: " + options)
if os.name == "posix":
batch_run = os.path.join(
self.desktop_install_dir + "/ansysedt" + chr(34) + options + chr(34) + filename + chr(34)
)
else:
batch_run = (
chr(34) + self.desktop_install_dir + "/ansysedt.exe" + chr(34) + options + chr(34) + filename + chr(34)
)
"""
check for existing solution directory and delete if present so we
dont have old .asol files etc
"""
self.logger.info("Solving model in batch mode on " + machine)
self.logger.info("Batch Job command:" + batch_run)
if run_in_thread:
def thread_run():
""" """
os.system(batch_run)
x = threading.Thread(target=thread_run)
x.start()
else:
os.system(batch_run)
self.logger.info("Batch job finished.")
return True
@aedt_exception_handler
def submit_job(
self, clustername, aedt_full_exe_path=None, numnodes=1, numcores=32, wait_for_license=True, setting_file=None
):
"""Submit a job to be solved on a cluster.
Parameters
----------
clustername : str
Name of the cluster to submit the job to.
aedt_full_exe_path : str, optional
Full path to the AEDT executable file. The default is ``None``, in which
case ``"/clustername/AnsysEM/AnsysEM2x.x/Win64/ansysedt.exe"`` is used.
numnodes : int, optional
Number of nodes. The default is ``1``.
numcores : int, optional
Number of cores. The default is ``32``.
wait_for_license : bool, optional
Whether to wait for the license to be validated. The default is ``True``.
setting_file : str, optional
Name of the file to use as a template. The default value is ``None``.
Returns
-------
type
ID of the job.
"""
project_file = self.project_file
project_path = self.project_path
if not aedt_full_exe_path:
version = self.odesktop.GetVersion()[2:6]
if os.path.exists(r"\\" + clustername + r"\AnsysEM\AnsysEM{}\Win64\ansysedt.exe".format(version)):
aedt_full_exe_path = (
r"\\\\\\\\" + clustername + r"\\\\AnsysEM\\\\AnsysEM{}\\\\Win64\\\\ansysedt.exe".format(version)
)
elif os.path.exists(r"\\" + clustername + r"\AnsysEM\AnsysEM{}\Linux64\ansysedt".format(version)):
aedt_full_exe_path = (
r"\\\\\\\\" + clustername + r"\\\\AnsysEM\\\\AnsysEM{}\\\\Linux64\\\\ansysedt".format(version)
)
else:
self.logger.glb.error("AEDT path does not exist. Please provide a full path.")
return False
else:
if not os.path.exists(aedt_full_exe_path):
self.logger.glb.error("Aedt Path doesn't exists. Please provide a full path")
return False
aedt_full_exe_path.replace("\\", "\\\\")
self.close_project()
path_file = os.path.dirname(__file__)
destination_reg = os.path.join(project_path, "Job_settings.areg")
if not setting_file:
setting_file = os.path.join(path_file, "..", "misc", "Job_Settings.areg")
shutil.copy(setting_file, destination_reg)
f1 = open(destination_reg, "w")
with open(setting_file) as f:
lines = f.readlines()
for line in lines:
if "\\ $begin" == line[:8]:
lin = "\\ $begin \\'{}\\'\\\n".format(clustername)
f1.write(lin)
elif "\\ $end" == line[:6]:
lin = "\\ $end \\'{}\\'\\\n".format(clustername)
f1.write(lin)
elif "NumCores" in line:
lin = "\\ \\ \\ \\ NumCores={}\\\n".format(numcores)
f1.write(lin)
elif "NumNodes=1" in line:
lin = "\\ \\ \\ \\ NumNodes={}\\\n".format(numnodes)
f1.write(lin)
elif "ProductPath" in line:
lin = "\\ \\ ProductPath =\\'{}\\'\\\n".format(aedt_full_exe_path)
f1.write(lin)
elif "WaitForLicense" in line:
lin = "\\ \\ WaitForLicense={}\\\n".format(str(wait_for_license).lower())
f1.write(lin)
else:
f1.write(line)
f1.close()
return self.odesktop.SubmitJob(os.path.join(project_path, "Job_settings.areg"), project_file)
|
app.py
|
from aiocqhttp import CQHttp
from datetime import datetime
from sendmsg import SendMsg
from loadData import LoadData
import threading
import time
# windows本机运行本脚本与coolq的配置
# HOST = '127.0.0.1'
# PORT = 7788
# 这个url是发送给docker容器里的coolq
# 举例来说,假如docker命令有这样的 -p 3542:9000 -p 15700:5700
# 9000 是coolq暴露的页面访问地址(这里映射到了外面的3542,所以外界通过3542端口访问)
# 而5700是是coolq接受数据的端口(即是这个python服务发送给coolq的数据),这里映射到了15700,
# 所以外界通过15700端口发送信息给coolq
BASEURL = 'http://127.0.0.1:15700/'
bot = CQHttp(api_root=BASEURL)
d = {
# '博客': 'https://blog.csdn.net/qq20004604',
# 'github': 'https://github.com/qq20004604',
# 'nginx': 'https://github.com/qq20004604/nginx-demo',
# 'django': 'https://github.com/qq20004604/Python3_Django_Demo',
# 'docker': 'https://github.com/qq20004604/docker-learning',
# 'webpack': 'https://github.com/qq20004604/webpack-study',
# 'react': 'https://github.com/qq20004604/react-demo',
# 'vue': 'github: https://github.com/qq20004604/vue-scaffold\n博客专栏(1.x):https://blog.csdn.net/qq20004604/article/category/6381182',
# '笔记': 'https://github.com/qq20004604/notes',
# 'demo': 'https://github.com/qq20004604/some_demo',
# '海外服务器': 'https://manage.hostdare.com/aff.php?aff=939\n这个可以做私人服务器(不需要备案),也可以找群主询问如何架设SS server的方法。',
# 'QQ 机器人': 'https://github.com/qq20004604/qq-robot',
# '架构': 'https://juejin.im/post/5cea1f705188250640005472',
# 'es6': 'https://blog.csdn.net/qq20004604/article/details/78014684',
# 'vue脚手架': 'https://github.com/qq20004604/Vue-with-webpack',
# 'react脚手架': 'https://github.com/qq20004604/react-with-webpack',
# 'Macbook常用软件': 'https://github.com/qq20004604/when-you-get-new-Macbook',
# 'python的django与mysql交互': 'https://blog.csdn.net/qq20004604/article/details/89934212'
}
ld = LoadData()
def log(context, filename='./log.log'):
with open(filename, 'a', encoding='utf-8') as f:
f.write('time:%s, sender:%s, message_type:%s, user_id:%s, content:%s\n' % (
datetime.now(),
context['sender']['nickname'],
context['message_type'],
context['sender']['user_id'],
context['raw_message']))
@bot.on_message()
async def handle_msg(context):
msg = context['message']
# print(msg)
'''
# print(str(context)) 内容示例如下
{'font': 1473688, 'message': '#help', 'message_id': 528, 'message_type': 'private', 'post_type': 'message',
'raw_message': '#help', 'self_id': 2691365658,
'sender': {'age': 30, 'nickname': '零零水', 'sex': 'male', 'user_id': 20004604}, 'sub_type': 'friend',
'time': 1558283078, 'user_id': 20004604}
'''
result = ''
isindict = False
isinhelp = False
for k in d:
if ('#' + k) in msg:
result += d[k] + '\n'
isindict = True
if '#help' in msg:
result += '你可以使用以下命令~记得前面带上#喔\n'
isinhelp = True
for k in d:
result += '#' + k + '\n'
# 默认词典要求给star
if isindict is True:
result += "记得给star!"
# 只要是词典之一,则打印日志
if isindict is True or isinhelp is True:
log(context)
return {'reply': result}
@bot.on_notice('group_increase')
async def handle_group_increase(context):
await bot.send(context, message='欢迎新人~可以输入#help来向我查询所有命令喔',
at_sender=True, auto_escape=True)
@bot.on_request('group', 'friend')
async def handle_request(context):
return {'approve': True}
SendMsg(BASEURL)
def mixin_dict():
global d
minutes = 0
while True:
# 1 分钟更新一次
minutes = minutes + 1
if minutes % 60 == 0:
print('%s hours pass' % (minutes / 60))
ld_dict = ld.load_search_info()
d = {**ld_dict}
time.sleep(60)
t1 = threading.Thread(target=mixin_dict, name='loop')
t1.start()
# docker的配置
HOST = '172.18.0.1'
PORT = 12399
# 这里是coolq接收到qq信息,然后发送到这个python服务的端口。
# 所以也就是这个python服务,接收到这个消息的端口
# 在 coolq 的docker容器里,这个是在 */coolq/app/io.github.richardchien.coolqhttpapi/config/(qq号).ini 里配置的
# 由于容器不能通过 127.0.0.1 直接访问宿主机的端口,因此,需要通过执行 ip addr show docker0 命令来查看宿主机的端口
# 举例来说,我的server执行这个命令,获得的宿主机的 ip 是 172.18.0.1 (即,容器访问 172.18.0.1 这个地址是访问宿主机)
# 于是修改那个ini配置文件:post_url = http://172.18.0.1:34519
# 这里的host可以保持要和那个ip地址保持一样,port也是
bot.run(host=HOST, port=PORT)
|
start.py
|
#!/usr/bin/python3
import os
import glob
import shutil
import multiprocessing
import logging as log
import sys
from podop import run_server
from socrate import system, conf
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
def start_podop():
os.setuid(100)
url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/"
# TODO: Remove verbosity setting from Podop?
run_server(0, "postfix", "/tmp/podop.socket", [
("transport", "url", url + "transport/§"),
("alias", "url", url + "alias/§"),
("domain", "url", url + "domain/§"),
("mailbox", "url", url + "mailbox/§"),
("recipientmap", "url", url + "recipient/map/§"),
("sendermap", "url", url + "sender/map/§"),
("senderaccess", "url", url + "sender/access/§"),
("senderlogin", "url", url + "sender/login/§")
])
def is_valid_postconf_line(line):
return not line.startswith("#") \
and not line == ''
# Actual startup script
os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
os.environ["ANTISPAM_MILTER_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_MILTER", "antispam:11332")
os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525")
for postfix_file in glob.glob("/conf/*.cf"):
conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
if os.path.exists("/overrides/postfix.cf"):
for line in open("/overrides/postfix.cf").read().strip().split("\n"):
if is_valid_postconf_line(line):
os.system('postconf -e "{}"'.format(line))
if os.path.exists("/overrides/postfix.master"):
for line in open("/overrides/postfix.master").read().strip().split("\n"):
if is_valid_postconf_line(line):
os.system('postconf -Me "{}"'.format(line))
for map_file in glob.glob("/overrides/*.map"):
destination = os.path.join("/etc/postfix", os.path.basename(map_file))
shutil.copyfile(map_file, destination)
os.system("postmap {}".format(destination))
os.remove(destination)
if "RELAYUSER" in os.environ:
path = "/etc/postfix/sasl_passwd"
conf.jinja("/conf/sasl_passwd", os.environ, path)
os.system("postmap {}".format(path))
# Run Podop and Postfix
multiprocessing.Process(target=start_podop).start()
os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing")
os.system("postfix start-fg")
|
worker.py
|
import os
import sys
import time
import queue
import threading as mt
import multiprocessing as mp
import radical.utils as ru
from .. import Session
from .. import utils as rpu
from .. import constants as rpc
# ------------------------------------------------------------------------------
#
class Worker(rpu.Component):
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session=None):
self._session = session
if isinstance(cfg, str): cfg = ru.Config(cfg=ru.read_json(cfg))
else : cfg = ru.Config(cfg=cfg)
# generate a MPI rank dependent UID for each worker process
# FIXME: this should be delegated to ru.generate_id
# FIXME: why do we need to import `os` again after MPI Spawn?
import os # pylint: disable=reimported
# FIXME: rank determination should be moved to RU
rank = None
if rank is None: rank = os.environ.get('PMIX_RANK')
if rank is None: rank = os.environ.get('PMI_RANK')
if rank is None: rank = os.environ.get('OMPI_COMM_WORLD_RANK')
# keep worker ID and rank
cfg['wid'] = cfg['uid']
cfg['rank'] = rank
if rank is not None:
cfg['uid'] = '%s.%03d' % (cfg['uid'], int(rank))
self._n_cores = cfg.cores
self._n_gpus = cfg.gpus
self._info = ru.Config(cfg=cfg.get('info', {}))
if not self._session:
self._session = Session(cfg=cfg, uid=cfg.sid, _primary=False)
rpu.component.debug = True
rpu.Component.__init__(self, cfg, self._session)
self._res_evt = mp.Event() # set on free resources
self._mlock = ru.Lock(self._uid) # lock `_modes`
self._modes = dict() # call modes (call, exec, eval, ...)
# We need to make sure to run only up to `gpn` tasks using a gpu
# within that pool, so need a separate counter for that.
self._resources = {'cores' : [0] * self._n_cores,
'gpus' : [0] * self._n_gpus}
# resources are initially all free
self._res_evt.set()
# # create a multiprocessing pool with `cpn` worker processors. Set
# # `maxtasksperchild` to `1` so that we get a fresh process for each
# # task. That will also allow us to run command lines via `exec`,
# # effectively replacing the worker process in the pool for a specific
# # task.
# #
# # We use a `fork` context to inherit log and profile handles.
# #
# # NOTE: The mp documentation is wrong; mp.Pool does *not* have a context
# # parameters. Instead, the Pool has to be created within
# # a context.
# ctx = mp.get_context('fork')
# self._pool = ctx.Pool(processes=self._n_cores,
# initializer=None,
# maxtasksperchild=1)
# NOTE: a multiprocessing pool won't work, as pickle is not able to
# serialize our worker object. So we use our own process pool.
# It's not much of a loss since we want to respawn new processes for
# each task anyway (to improve isolation).
self._pool = dict() # map task uid to process instance
self._plock = ru.Lock('p' + self._uid) # lock _pool
# We also create a queue for communicating results back, and a thread to
# watch that queue
self._result_queue = mp.Queue()
self._result_thread = mt.Thread(target=self._result_watcher)
self._result_thread.daemon = True
self._result_thread.start()
# connect to master
self.register_subscriber(rpc.CONTROL_PUBSUB, self._control_cb)
self.register_publisher(rpc.CONTROL_PUBSUB)
# run worker initialization *before* starting to work on requests.
# the worker provides three builtin methods:
# eval: evaluate a piece of python code
# exec: execute a command line (fork/exec)
# shell: execute a shell command
# call: execute a method or function call
self.register_mode('call', self._call)
self.register_mode('eval', self._eval)
self.register_mode('exec', self._exec)
self.register_mode('shell', self._shell)
self.pre_exec()
# connect to the request / response ZMQ queues
self._res_put = ru.zmq.Putter('to_res', self._info.res_addr_put)
self._req_get = ru.zmq.Getter('to_req', self._info.req_addr_get,
cb=self._request_cb)
# the worker can return custom information which will be made available
# to the master. This can be used to communicate, for example, worker
# specific communication endpoints.
# `info` is a placeholder for any additional meta data communicated to
# the worker. Only first rank publishes.
if self._cfg['rank'] == 0:
self.publish(rpc.CONTROL_PUBSUB, {'cmd': 'worker_register',
'arg': {'uid' : self._cfg['wid'],
'info': self._info}})
# prepare base env dict used for all tasks
self._task_env = dict()
for k,v in os.environ.items():
if k.startswith('RP_'):
self._task_env[k] = v
# --------------------------------------------------------------------------
#
# This class-method creates the appropriate sub-class for the Stager
#
@classmethod
def create(cls, cfg, session):
return Worker(cfg, session)
# --------------------------------------------------------------------------
#
def pre_exec(self):
'''
This method can be overloaded by the Worker implementation to run any
pre_exec commands before spawning worker processes.
'''
pass
# --------------------------------------------------------------------------
#
def register_mode(self, name, executor):
assert(name not in self._modes)
self._modes[name] = executor
# --------------------------------------------------------------------------
#
def _call(self, data):
'''
We expect data to have a three entries: 'method' or 'function',
containing the name of the member method or the name of a free function
to call, `args`, an optional list of unnamed parameters, and `kwargs`,
and optional dictionary of named parameters.
'''
if 'method' in data:
to_call = getattr(self, data['method'], None)
elif 'function' in data:
names = dict(list(globals().items()) + list(locals().items()))
to_call = names.get(data['function'])
else:
raise ValueError('no method or function specified: %s' % data)
if not to_call:
raise ValueError('callable not found: %s' % data)
args = data.get('args', [])
kwargs = data.get('kwargs', {})
try:
out = to_call(*args, **kwargs)
err = None
ret = 0
except Exception as e:
self._log.exception('_call failed: %s' % (data))
out = None
err = 'call failed: %s' % e
ret = 1
return out, err, ret
# --------------------------------------------------------------------------
#
# FIXME: an MPI call mode should be added. That could work along these
# lines of:
#
# --------------------------------------------------------------------------
# def _mpi(self, data):
#
# try:
# cmd = rp.agent.launch_method.construct_command(data,
# executable=self.exe, args=data['func'])
# out = rp.sh_callout(cmd)
# err = None
# ret = 0
#
# except Exception as e:
# self._log.exception('_mpi failed: %s' % (data))
# out = None
# err = 'mpi failed: %s' % e
# ret = 1
#
# return out, err, ret
# --------------------------------------------------------------------------
#
# For that to work we would need to be able to create a LM here, but ideally
# not replicate the work done in the agent executor.
# --------------------------------------------------------------------------
#
def _eval(self, data):
'''
We expect data to have a single entry: 'code', containing the Python
code to be eval'ed
'''
try:
out = eval(data['code'])
err = None
ret = 0
except Exception as e:
self._log.exception('_eval failed: %s' % (data))
out = None
err = 'eval failed: %s' % e
ret = 1
return out, err, ret
# --------------------------------------------------------------------------
#
def _exec(self, data):
'''
We expect data to have two entries: 'exe', containing the executabele to
run, and `args` containing a list of arguments (strings) to pass as
command line arguments. We use `sp.Popen` to run the fork/exec, and to
collect stdout, stderr and return code
'''
try:
import subprocess as sp
exe = data['exe']
args = data.get('args', list())
env = data.get('env', dict())
args = '%s %s' % (exe, ' '.join(args))
proc = sp.Popen(args=args, env=env,
stdin=None, stdout=sp.PIPE, stderr=sp.PIPE,
close_fds=True, shell=True)
out, err = proc.communicate()
ret = proc.returncode
except Exception as e:
self._log.exception('_exec failed: %s' % (data))
out = None
err = 'exec failed: %s' % e
ret = 1
return out, err, ret
# --------------------------------------------------------------------------
#
def _shell(self, data):
'''
We expect data to have a single entry: 'cmd', containing the command
line to be called as string.
'''
try:
out, err, ret = ru.sh_callout(data['cmd'], shell=True)
except Exception as e:
self._log.exception('_shell failed: %s' % (data))
out = None
err = 'shell failed: %s' % e
ret = 1
return out, err, ret
# --------------------------------------------------------------------------
#
def _alloc_task(self, task):
'''
allocate task resources
'''
with self._mlock:
cores = task.get('cores', 1)
gpus = task.get('gpus' , 0)
assert(cores >= 1)
assert(cores <= self._n_cores)
assert(gpus <= self._n_gpus)
if cores > self._resources['cores'].count(0): return False
if gpus > self._resources['gpus' ].count(0): return False
alloc_cores = list()
alloc_gpus = list()
if cores:
for n in range(self._n_cores):
if not self._resources['cores'][n]:
self._resources['cores'][n] = 1
alloc_cores.append(n)
if len(alloc_cores) == cores:
break
if gpus:
for n in range(self._n_gpus):
if not self._resources['gpus'][n]:
self._resources['gpus'][n] = 1
alloc_gpus.append(n)
if len(alloc_gpus) == gpus:
break
task['resources'] = {'cores': alloc_cores,
'gpus' : alloc_gpus}
return True
# --------------------------------------------------------------------------
#
def _dealloc_task(self, task):
'''
deallocate task resources
'''
with self._mlock:
resources = task['resources']
for n in resources['cores']:
assert(self._resources['cores'][n])
self._resources['cores'][n] = 0
for n in resources['gpus']:
assert(self._resources['gpus'][n])
self._resources['gpus'][n] = 0
# signal available resources
self._res_evt.set()
return True
# --------------------------------------------------------------------------
#
def _request_cb(self, tasks):
'''
grep call type from tasks, check if methods are registered, and
invoke them.
'''
self._log.debug('=== req_loop %s', len(ru.as_list(tasks)))
for task in ru.as_list(tasks):
self._log.debug('=== req_recv %s', task['uid'])
task['worker'] = self._uid
try:
# ok, we have work to do. Check the requirements to see how
# many cpus and gpus we need to mark as busy
while not self._alloc_task(task):
self._log.debug('=== req_alloc %s', task['uid'])
# no resource - wait for new resources
#
# NOTE: this will block smaller tasks from being executed
# right now. alloc_task is not a proper scheduler,
# after all.
# while not self._res_evt.wait(timeout=1.0):
# self._log.debug('=== req_alloc_wait %s', task['uid'])
time.sleep(0.01)
# break on termination
if self._term.is_set():
return False
self._res_evt.clear()
self._log.debug('=== req_alloced %s', task['uid'])
self._prof.prof('req_start', uid=task['uid'], msg=self._uid)
# we got an allocation for this task, and can run it, so apply
# to the process pool. The callback (`self._result_cb`) will
# pick the task up on completion and free resources.
#
# NOTE: we don't use mp.Pool - see __init__ for details
env = self._task_env
env['RP_TASK_ID'] = task['uid']
# ret = self._pool.apply_async(func=self._dispatch, args=[task],
# callback=self._result_cb,
# error_callback=self._error_cb)
proc = mp.Process(target=self._dispatch, args=[task, env])
# proc.daemon = True
with self._plock:
# we need to include `proc.start()` in the lock, as
# otherwise we may end up getting the `self._result_cb`
# before the pid could be registered in `self._pool`.
proc.start()
self._pool[proc.pid] = proc
self._log.debug('applied: %s: %s: %s',
task['uid'], proc.pid, self._pool.keys())
self._log.debug('=== req_started %s: %s', task['uid'], proc.pid)
except Exception as e:
self._log.exception('request failed')
# free resources again for failed task
self._dealloc_task(task)
res = {'req': task['uid'],
'out': None,
'err': 'req_cb error: %s' % e,
'ret': 1}
self._res_put.put(res)
self._log.debug('=== req_looped')
def _after_fork():
with open('/tmp/after_fork', 'a+') as fout:
fout.write('after fork %s %s\n' % (os.getpid(), mt.current_thread().name))
# --------------------------------------------------------------------------
#
def _dispatch(self, task, env):
# this method is running in a process of the process pool, and will now
# apply the task to the respective execution mode.
#
# NOTE: application of pre_exec directives may got here
task['pid'] = os.getpid()
# apply task env settings
for k,v in env.items():
os.environ[k] = v
for k,v in task.get('environment', {}).items():
os.environ[k] = v
# ----------------------------------------------------------------------
def _dispatch_thread():
import setproctitle
setproctitle.setproctitle('dispatch.%s' % task['uid'])
out, err, ret = self._modes[mode](task.get('data'))
res = [task, str(out), str(err), int(ret)]
self._log.debug('put 1 result: task %s', task['uid'])
self._result_queue.put(res)
# ----------------------------------------------------------------------
ret = None
try:
# self._log.debug('dispatch: %s: %d', task['uid'], task['pid'])
mode = task['mode']
assert(mode in self._modes), 'no such call mode %s' % mode
self._log.debug('=== debug %s: %s', task['uid'], task)
tout = task.get('timeout')
self._log.debug('dispatch with tout %s', tout)
out, err, ret = self._modes[mode](task.get('data'))
res = [task, str(out), str(err), int(ret)]
self._log.debug('put 1 result: task %s', task['uid'])
self._result_queue.put(res)
# dispatcher = mp.Process(target=_dispatch_thread)
# dispatcher.daemon = True
# dispatcher.start()
# self._log.debug('=== join %s: %s', task['uid'], task)
# dispatcher.join(timeout=tout)
# self._log.debug('=== joined %s: %s', task['uid'], tout)
#
# if dispatcher.is_alive():
# dispatcher.kill()
# dispatcher.join()
# out = None
# err = 'timeout (>%s)' % tout
# ret = 1
# res = [task, str(out), str(err), int(ret)]
# self._log.debug('put 2 result: task %s', task['uid'])
# self._result_queue.put(res)
# self._log.debug('dispatcher killed: %s', task['uid'])
except Exception as e:
self._log.exception('dispatch failed')
out = None
err = 'dispatch failed: %s' % e
ret = 1
res = [task, str(out), str(err), int(ret)]
self._log.debug('put 3 result: task %s', task['uid'])
self._result_queue.put(res)
finally:
# if we kill the process too quickly, the result put above
# will not make it out, thus make sure the queue is empty
# first.
ret = 1
self._result_queue.close()
self._result_queue.join_thread()
sys.exit(ret)
# os.kill(os.getpid(), signal.SIGTERM)
# --------------------------------------------------------------------------
#
def _result_watcher(self):
try:
while not self._term.is_set():
# self._log.debug('=== waiting for results')
try:
res = self._result_queue.get(timeout=0.1)
self._log.debug('got result: %s', res)
self._result_cb(res)
except queue.Empty:
pass
except:
self._log.exception('queue error')
raise
finally:
self._log.debug('=== send unregister')
if self._cfg['rank'] == 0:
self.publish(rpc.CONTROL_PUBSUB, {'cmd': 'worker_unregister',
'arg': {'uid' : self._cfg['wid']}})
# --------------------------------------------------------------------------
#
def _result_cb(self, result):
try:
task, out, err, ret = result
# self._log.debug('result cb: task %s', task['uid'])
with self._plock:
pid = task['pid']
del(self._pool[pid])
# free resources again for the task
self._dealloc_task(task)
res = {'req': task['uid'],
'out': out,
'err': err,
'ret': ret}
self._res_put.put(res)
self._prof.prof('req_stop', uid=task['uid'], msg=self._uid)
except:
self._log.exception('result cb failed')
raise
# --------------------------------------------------------------------------
#
def _error_cb(self, error):
self._log.debug('error: %s', error)
raise RuntimeError(error)
# --------------------------------------------------------------------------
#
def _control_cb(self, topic, msg):
if msg['cmd'] == 'terminate':
self._term.set()
elif msg['cmd'] == 'worker_terminate':
if msg['arg']['uid'] == self._cfg['wid']:
self._term.set()
# --------------------------------------------------------------------------
#
def start(self):
# note that this overwrites `Component.start()` - this worker component
# is not using the registered input channels, but listens to it's own
# set of channels in `_request_cb`.
pass
# --------------------------------------------------------------------------
#
def join(self):
while not self._term.is_set():
time.sleep(1.0)
# ------------------------------------------------------------------------------
|
exchange_rate.py
|
from datetime import datetime
import inspect
import requests
import sys
import os
import json
from threading import Thread
import time
import csv
import decimal
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0,
# Not ISO 4217.
'BTC': 8}
DEFAULT_EXCHANGE = 'BitcoinAverage'
DEFAULT_CCY = 'USD'
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum-Ciphscoin'}, timeout=10)
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum-Ciphscoin'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def read_historical_rates(self, ccy, cache_dir):
filename = os.path.join(cache_dir, self.name() + '_'+ ccy)
if os.path.exists(filename):
timestamp = os.stat(filename).st_mtime
try:
with open(filename, 'r', encoding='utf-8') as f:
h = json.loads(f.read())
h['timestamp'] = timestamp
except:
h = None
else:
h = None
if h:
self.history[ccy] = h
self.on_history()
return h
def get_historical_rates_safe(self, ccy, cache_dir):
try:
self.print_error("requesting fx history for", ccy)
h = self.request_history(ccy)
self.print_error("received fx history for", ccy)
except BaseException as e:
self.print_error("failed fx history:", e)
return
filename = os.path.join(cache_dir, self.name() + '_' + ccy)
with open(filename, 'w', encoding='utf-8') as f:
f.write(json.dumps(h))
h['timestamp'] = time.time()
self.history[ccy] = h
self.on_history()
def get_historical_rates(self, ccy, cache_dir):
if ccy not in self.history_ccys():
return
h = self.history.get(ccy)
if h is None:
h = self.read_historical_rates(ccy, cache_dir)
if h is None or h['timestamp'] < time.time() - 24*3600:
t = Thread(target=self.get_historical_rates_safe, args=(ccy, cache_dir))
t.setDaemon(True)
t.start()
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'), 'NaN')
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com',
'/indices/local/ticker/ZEC%s' % ccy)
return {ccy: Decimal(json['last'])}
def history_ccys(self):
return ['USD', 'EUR', 'PLN']
def request_history(self, ccy):
history = self.get_json('apiv2.bitcoinaverage.com',
"/indices/local/history/ZEC%s"
"?period=alltime&format=json" % ccy)
return dict([(h['time'][:10], h['average']) for h in history])
class Bittrex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bittrex.com',
'/api/v1.1/public/getticker?market=BTC-KMD')
quote_currencies = {}
if not json.get('success', False):
return quote_currencies
last = Decimal(json['result']['Last'])
quote_currencies['BTC'] = last
return quote_currencies
class Poloniex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('poloniex.com', '/public?command=returnTicker')
quote_currencies = {}
zcash_ticker = json.get('BTC_ZEC')
quote_currencies['BTC'] = Decimal(zcash_ticker['last'])
return quote_currencies
class CoinMarketCap(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinmarketcap.com', '/v1/ticker/1437/')
quote_currencies = {}
if not isinstance(json, list):
return quote_currencies
json = json[0]
for ccy, key in [
('USD', 'price_usd'),
]:
quote_currencies[ccy] = Decimal(json[key])
return quote_currencies
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
print(name, "ok")
except:
print(name, "error")
continue
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.cache_dir = os.path.join(config.path, 'cache')
self.set_exchange(self.config_exchange())
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_history_capital_gains_config(self):
return bool(self.config.get('history_rates_capital_gains', False))
def set_history_capital_gains_config(self, b):
self.config.set_key('history_rates_capital_gains', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", DEFAULT_CCY)
def config_exchange(self):
return self.config.get('use_exchange', DEFAULT_EXCHANGE)
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, Bittrex)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
self.exchange.read_historical_rates(self.ccy, self.cache_dir)
def on_quotes(self):
if self.network:
self.network.trigger_callback('on_quotes')
def on_history(self):
if self.network:
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate is None:
return Decimal('NaN')
return Decimal(rate)
def format_amount(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s" % self.value_str(btc_balance, rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate.is_nan() else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def fiat_value(self, satoshis, rate):
return Decimal('NaN') if satoshis is None else Decimal(satoshis) / COIN * Decimal(rate)
def value_str(self, satoshis, rate):
return self.format_fiat(self.fiat_value(satoshis, rate))
def format_fiat(self, value):
if value.is_nan():
return _("No data")
return "%s" % (self.ccy_amount_str(value, True))
def history_rate(self, d_t):
if d_t is None:
return Decimal('NaN')
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate == 'NaN' and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy, 'NaN')
self.history_used_spot = True
return Decimal(rate)
def historical_value_str(self, satoshis, d_t):
return self.format_fiat(self.historical_value(satoshis, d_t))
def historical_value(self, satoshis, d_t):
return self.fiat_value(satoshis, self.history_rate(d_t))
def timestamp_rate(self, timestamp):
from electrum_zcash.util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date)
|
check_vms.py
|
import threading
import constants_spgw as constants
import ConfigParser
import ipaddress
import time
import os
import check_vms_constants
diagram = """\
+--------------+
Control+----------------> S1MME| MME |
Path +-------| |
| +--------------+
| |
| S6A
| |
+----------+ | +--------------+ +----------+
| Traffic | | | HSS | | Traffic |
| Generator| S11 | | | Receiver |
| | | +--------------+ | |
| | | | | |
+-+---+----+ | DB +---+--+---+
| | | | | |
| | | +--------------+ | |
| | | | DB | | |
| | Data Path | | | | |
| | | +--------------+ | |
| | | | |
| | | | |
| | +-----------------------------------------+ | |
| | | SPGWC | | |
| | | | | |
| | +-----------------------------------------+ | |
| | | | |
| | | | |
| | | | |
| | | | |
| | | | |
| | +-----------------------------------------+ | |
| +------------ | |----+ |
+----S1U----------| SPGWU |--SGI--+
+-----------------------------------------+
"""
status=""
host_change_status=""
hosts={}
hosts_list=[]
status_cmd = check_vms_constants.status_lcmd
host_change_cmd=check_vms_constants.host_change_cmd
def get_hosts(config):
global hosts
for sect in config.sections():
if constants.INSTANCE_TYPES.__contains__(sect) and not config.get(sect,
constants.INSTANCE_COUNT) == "0":
host_type=config.get(sect, constants.HOST_TYPE)
ip=config.get("HOST", host_type).split('@')[1].split('"')[0]
hosts[sect]=ip
for key, value in config.items("HOST"):
ip=value.split('@')[1].split('"')[0]
if ip in hosts.values():
hosts_list.append(ip)
def parse_ini_file():
config = ConfigParser.ConfigParser()
config.optionxform = str
config.read(constants.INPUT_FILE_PATH)
return config
def check_connectivity(config,temp_host_list):
print "Check connectivity started"
global diagram
global hosts
running_vms=set()
test_result = '\033[40;32m{}\033[m'.format("TEST PASSED")
for value in temp_host_list:
f = os.popen(' sudo ssh '+value+' virsh list | awk \'NR >2 { print $2 }\'')
lines = f.read().strip('\n')
running_vms.update(lines.splitlines())
# print("Following VMs are running:\n"+running_vms+"\n")
for src_host in config.sections():
if src_host.lower() in running_vms and constants.INSTANCE_TYPES.__contains__(
src_host) and not config.get(src_host,
constants.INSTANCE_COUNT) == "0":
# print("Checking "+src_host+" ....connection")
# print("....................................\n")
os.system(['clear', 'cls'][os.name == 'nt'])
print(diagram)
for dest_host in eval('constants.' + src_host + '_CONN'):
for interface in eval('constants.' + dest_host):
temp = str(interface).replace('_S', '_P') if str(interface).__contains__('_S') else str(
interface).replace('_P', '_S') if str(
interface).__contains__('_P') else interface
if eval('constants.' + src_host).__contains__(temp):
# print("Checking " + src_host + ":" + interface + ".....")
dest_ip = config.get(dest_host, "NETWORK.1." + interface + "_IP")
f = os.popen('sudo ssh '+hosts.get(src_host)+' virsh domifaddr ' + str( src_host).lower() + ' |grep ipv4 |awk \'{print $4}\' |cut -d \'/\' -f1')
src_ip = f.read().strip('\n')
#print src_ip
print(src_host+"<-----"+interface+"-------->"+dest_host)
if dest_host == "SPGWU" or dest_host == "IL_NPERF":
#Comment below one line when DP is up
continue
f = os.popen(
'sudo ssh '+hosts.get(src_host)+' ssh -q -o \'StrictHostKeyChecking no\' -i /home/ubuntu/.ssh/id_rsa ubuntu@' + src_ip + ' python /opt/ngic/dpdk/tools/dpdk-devbind.py --status | awk \'/drv=igb_uio/{count++} END{print count}\'')
result = f.read().strip('\n')
if not "2" in str(result):
# print("Not Bound to dpdk"+result+"\n")
test_result = '\033[40;31m{}\033[m'.format("TEST FAILED")
if interface == 'DB':
diagram = diagram.replace(interface, '\033[40;31m{}\033[m'.format(interface), 1)
else:
diagram = diagram.replace(interface, '\033[40;31m{}\033[m'.format(interface))
else:
if interface == 'DB':
diagram = diagram.replace(interface, '\033[40;32m{}\033[m'.format(interface), 1)
else:
diagram = diagram.replace(interface, '\033[40;32m{}\033[m'.format(interface))
# print("Bound to dpdk\n")
else:
f = os.popen('sudo ssh '+hosts.get(src_host)+' "ssh -q -o \'StrictHostKeyChecking no\' -i /home/ubuntu/.ssh/id_rsa ubuntu@' + src_ip + ' \'ping -q -c 5 -W 1 ' + dest_ip + '\'"')
result = f.read().strip('\n')
# print 'ssh '+hosts.get(src_host)+' ssh -q -o \'StrictHostKeyChecking no\' -i /home/ubuntu/.ssh/id_rsa ubuntu@' + src_ip + ' \'ping -q -c 5 -W 1 ' + dest_ip + '\''
# print result
if "errors" in str(result):
test_result = '\033[40;31m{}\033[m'.format("TEST FAILED")
if interface == 'DB':
diagram = diagram.replace(interface, '\033[40;31m{}\033[m'.format(interface), 1)
else:
diagram = diagram.replace(interface, '\033[40;31m{}\033[m'.format(interface))
# print("Not Connected\n"+result+"\n")
else:
if interface == 'DB':
diagram = diagram.replace(interface, '\033[40;32m{}\033[m'.format(interface), 1)
else:
diagram = diagram.replace(interface, '\033[40;32m{}\033[m'.format(interface))
# print("Connected\n")
break
# print("....................................\n\n")
print(test_result)
def nth_repl(s, sub, repl, nth):
find = s.find(sub)
i = find != -1
while find != -1 and i != nth:
find = s.find(sub, find + 1)
i += 1
if i == nth:
return s[:find]+repl+s[find + len(sub):]
return s
class STATUS(object):
def __init__(self):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
global status_cmd
global status
while True:
f = os.popen(status_cmd)
status= f.read().strip('\n')
time.sleep(check_vms_constants.event_interval)
class HOST_CHANGE(object):
def __init__(self):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
global host_change_status
global hosts_list
global status_cmd
global status
global host_change_cmd
while True:
if "ssh" in host_change_cmd:
f = os.popen(host_change_cmd)
host_change_status= f.read().strip('\n')
if 'terraform_setup : Deploy' in status and len(hosts_list)>0:
temp_host=str(hosts_list.pop(0))
host_change_cmd="ssh "+temp_host+" "+check_vms_constants.host_change_cmd
status_cmd="ssh "+temp_host+" "+check_vms_constants.status_rcmd
if 'PLAY RECAP' in host_change_status and len(hosts_list)>0:
temp_host = str(hosts_list.pop(0))
host_change_cmd = "ssh " + temp_host + " " + check_vms_constants.host_change_cmd
status_cmd = "ssh " + temp_host + " " + check_vms_constants.status_rcmd
time.sleep(check_vms_constants.event_interval)
class Fun(object):
def __init__(self,vm,created_cmd,configured_cmd):
global hosts
self.vm=vm
self.created_cmd="ssh "+str(hosts.get(vm))+" "+created_cmd
self.configured_cmd="ssh "+str(hosts.get(vm))+" "+configured_cmd
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
create_check_done=False
configured_check_done=False
while True:
global diagram
if not create_check_done:
f = os.popen(self.created_cmd)
create_status= f.read().strip('\n')
if create_status:
if self.vm == 'MME' or self.vm == 'DB':
diagram = nth_repl(diagram,self.vm, '\033[40;33m{}\033[m'.format(self.vm),2)
else:
diagram = nth_repl(diagram,self.vm, '\033[40;33m{}\033[m'.format(self.vm),1)
create_check_done=True
if not configured_check_done:
f = os.popen(self.configured_cmd)
configured_status=f.read().strip('\n')
if configured_status:
if self.vm == 'MME' or self.vm == 'DB':
diagram = nth_repl(diagram,self.vm, '\033[40;32m{}\033[m'.format(self.vm),2)
else:
diagram = nth_repl(diagram,self.vm, '\033[40;32m{}\033[m'.format(self.vm),1)
configured_check_done=True
if create_check_done and configured_check_done:
break
time.sleep(check_vms_constants.event_interval)
get_hosts(parse_ini_file())
for key, value in hosts.items():
os.system('ssh '+value+' "sudo touch /tmp/deploy.log"')
temp_host_list = hosts_list[:]
vm_status=STATUS()
host_change=HOST_CHANGE()
vms=check_vms_constants.vms
for i in range(len(vms)):
fun=Fun(vms[i],eval('check_vms_constants.'+vms[i].lower().replace('-','_')+'_created_cmd'),eval('check_vms_constants.'+vms[i].lower().replace('-','_')+'_configured_cmd'))
timeout = time.time() + 60*3
while True:
if 'PLAY RECAP' in host_change_status and len(hosts_list)==0:
break
os.system(['clear', 'cls'][os.name == 'nt'])
print(diagram)
print('Progress : ' +status)
time.sleep(5)
check_connectivity(parse_ini_file(),temp_host_list)
|
local_timer_example.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing as mp
import signal
import time
import unittest
import torch.distributed.elastic.timer as timer
import torch.multiprocessing as torch_mp
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
IS_WINDOWS,
IS_MACOS,
sandcastle_skip_if,
)
logging.basicConfig(
level=logging.INFO, format="[%(levelname)s] %(asctime)s %(module)s: %(message)s"
)
def _happy_function(rank, mp_queue):
timer.configure(timer.LocalTimerClient(mp_queue))
with timer.expires(after=1):
time.sleep(0.5)
def _stuck_function(rank, mp_queue):
timer.configure(timer.LocalTimerClient(mp_queue))
with timer.expires(after=1):
time.sleep(5)
# timer is not supported on macos or windowns
if not (IS_WINDOWS or IS_MACOS):
class LocalTimerExample(unittest.TestCase):
"""
Demonstrates how to use LocalTimerServer and LocalTimerClient
to enforce expiration of code-blocks.
Since torch multiprocessing's ``start_process`` method currently
does not take the multiprocessing context as parameter argument
there is no way to create the mp.Queue in the correct
context BEFORE spawning child processes. Once the ``start_process``
API is changed in torch, then re-enable ``test_torch_mp_example``
unittest. As of now this will SIGSEGV.
"""
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
def test_torch_mp_example(self):
# in practice set the max_interval to a larger value (e.g. 60 seconds)
mp_queue = mp.get_context("spawn").Queue()
server = timer.LocalTimerServer(mp_queue, max_interval=0.01)
server.start()
world_size = 8
# all processes should complete successfully
# since start_process does NOT take context as parameter argument yet
# this method WILL FAIL (hence the test is disabled)
torch_mp.spawn(
fn=_happy_function, args=(mp_queue,), nprocs=world_size, join=True
)
with self.assertRaises(Exception):
# torch.multiprocessing.spawn kills all sub-procs
# if one of them gets killed
torch_mp.spawn(
fn=_stuck_function, args=(mp_queue,), nprocs=world_size, join=True
)
server.stop()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
def test_example_start_method_spawn(self):
self._run_example_with(start_method="spawn")
# @sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
# def test_example_start_method_forkserver(self):
# self._run_example_with(start_method="forkserver")
def _run_example_with(self, start_method):
spawn_ctx = mp.get_context(start_method)
mp_queue = spawn_ctx.Queue()
server = timer.LocalTimerServer(mp_queue, max_interval=0.01)
server.start()
world_size = 8
processes = []
for i in range(0, world_size):
if i % 2 == 0:
p = spawn_ctx.Process(target=_stuck_function, args=(i, mp_queue))
else:
p = spawn_ctx.Process(target=_happy_function, args=(i, mp_queue))
p.start()
processes.append(p)
for i in range(0, world_size):
p = processes[i]
p.join()
if i % 2 == 0:
self.assertEqual(-signal.SIGKILL, p.exitcode)
else:
self.assertEqual(0, p.exitcode)
server.stop()
if __name__ == "__main__":
run_tests()
|
auto_clicker.py
|
# -*- coding: utf-8 -*-
import os, time, pickle, sys, logging, re, random, threading, configparser, requests, json, schedule
#画像ダウンロード
from urllib import request
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
#----------------------------------------------------------------------------------------------------#
###ログインするアカウント情報、設定###
alive = True
# --------------------------------------------------
# configparserの宣言とiniファイルの読み込み
# --------------------------------------------------
config_ini = configparser.ConfigParser()
config_ini.read('config.ini', encoding='utf-8')
mode = config_ini.getint('Mode', 'mode')
email1 = config_ini.get('MainAccount', 'email1')
password1 = config_ini.get('MainAccount', 'password1')
t_email1 = config_ini.get('MainAccount', 't_email1')
t_password1 = config_ini.get('MainAccount', 't_password1')
os.makedirs("cache/"+email1, exist_ok=True)
email2 = config_ini.get('SubAccount', 'email2')
password2 = config_ini.get('SubAccount', 'password2')
t_email2 = config_ini.get('SubAccount', 't_email2')
t_password2 = config_ini.get('SubAccount', 't_password2')
if mode == 2:
os.makedirs("cache/"+email2, exist_ok=True)
#loggingモジュール設定
#コンソールにログを出力するハンドラー
stream_log = logging.StreamHandler()
stream_log.setLevel(logging.INFO)
stream_log.setFormatter(logging.Formatter('[%(levelname)s](%(lineno)s):%(message)s'))
#ファイルにログを出力するハンドラー
file_log = logging.FileHandler(filename='logger.log')
file_log.setLevel(logging.INFO)
file_log.setFormatter(logging.Formatter('[%(asctime)s][%(levelname)s](%(filename)s:%(lineno)s):%(message)s'))
#getLogger()でrootロガーを取得し、ハンドラーを設定
logging.getLogger().addHandler(stream_log)
logging.getLogger().addHandler(file_log)
#rootロガーのログレベルは、ハンドラーの中で一番低いものを指定しておく
#こうしておかないと、子ハンドラーにエラーが伝播しない
logging.getLogger().setLevel(logging.DEBUG)
#----------------------------------------------------------------------------------------------------#
#ブラウザ起動
options = Options()
options.add_argument('--headless') #ヘッドレスモードを有効
options.add_argument('--no-sandbox') #Sandboxの外でプロセスを動作(セキュリティ無効化)
options.add_argument('--disable-gpu') #GPU機能を無効化
options.add_argument('--window-size=1280,1024') #ウィンドウサイズの調整
driver1_1 = webdriver.Chrome(options=options)
driver1_2 = webdriver.Chrome(options=options)
driver1_3 = webdriver.Chrome(options=options)
driver1_4 = webdriver.Chrome(options=options)
if mode == 2:
driver2_1 = webdriver.Chrome(options=options)
driver2_2 = webdriver.Chrome(options=options)
driver2_3 = webdriver.Chrome(options=options)
#Yay!サーバー接続確認
try:
logging.info("Browser1_1 and Browser1_2 Connection check...")
driver1_1.get('https://yay.space/timeline/following')
WebDriverWait(driver1_1, 5).until(EC.presence_of_all_elements_located)
driver1_2.get('https://yay.space/timeline/following')
WebDriverWait(driver1_2, 5).until(EC.presence_of_all_elements_located)
driver1_3.get('https://yay.space/timeline/following')
WebDriverWait(driver1_3, 5).until(EC.presence_of_all_elements_located)
driver1_4.get('https://yay.space/timeline/following')
WebDriverWait(driver1_4, 5).until(EC.presence_of_all_elements_located)
if mode == 2:
logging.info("Browser2_1 Connection check...")
driver2_1.get('https://yay.space/timeline/following')
WebDriverWait(driver2_1, 5).until(EC.presence_of_all_elements_located)
driver2_2.get('https://yay.space/timeline/following')
WebDriverWait(driver2_2, 5).until(EC.presence_of_all_elements_located)
driver2_3.get('https://yay.space/timeline/following')
WebDriverWait(driver2_3, 5).until(EC.presence_of_all_elements_located)
logging.info("All Connected successfully...")
except:
logging.error("Browser Connection timed out...!!")
sys.exit()
#----------------------------------------------------------------------------------------------------#
###アカウントログイン###
def login():
#メインアカウント
try:
#ログイン状況チェック
logging.info("Check MainAccount login status...")
cookies = pickle.load(open("cache/" + email1 + "/cookies.pkl", "rb"))
for cookie in cookies:
driver1_1.add_cookie(cookie)
driver1_2.add_cookie(cookie)
driver1_3.add_cookie(cookie)
driver1_4.add_cookie(cookie)
driver1_1.refresh()
driver1_2.refresh()
driver1_3.refresh()
driver1_4.refresh()
WebDriverWait(driver1_1, 5).until(EC.presence_of_all_elements_located)
WebDriverWait(driver1_2, 5).until(EC.presence_of_all_elements_located)
WebDriverWait(driver1_3, 5).until(EC.presence_of_all_elements_located)
WebDriverWait(driver1_4, 5).until(EC.presence_of_all_elements_located)
driver1_1.find_element_by_class_name('Header__profile__a')
driver1_2.find_element_by_class_name('Header__profile__a')
driver1_3.find_element_by_class_name('Header__profile__a')
driver1_4.find_element_by_class_name('Header__profile__a')
logging.info("Logged in to MainAccount from saved information...")
except:
#ログインされていない場合
try:
logging.info("Browser1_1 Move page...")
driver1_1.get('https://yay.space/login')
WebDriverWait(driver1_1, 5).until(EC.presence_of_all_elements_located)
except:
logging.error("Browser1_1 Connection timed out...!!")
sys.exit()
#ログイン情報記入 > ログインボタンクリック
logging.info("Browser1_1 Start login...")
driver1_1.find_element_by_name('email').send_keys(email1)
driver1_1.find_element_by_name('password').send_keys(password1)
driver1_1.find_element_by_class_name('Button.Button--less-rounded.Button--icon-login').click()
#ログイン読み込み待ち
for _ in range(50):
if driver1_1.current_url == "https://yay.space/timeline/following":
break
else:
time.sleep(0.1)
else:
logging.error("Browser1_1 Connection timed out...!!")
sys.exit()
#ログインクッキー保存
pickle.dump(driver1_1.get_cookies() , open("cache/" + email1 + "/cookies.pkl","wb"))
logging.info("Browser1_1 Login completed...")
#ブラウザー2,3 クッキーからログイン
logging.info("Browser1_2 and Browser1_3 Start login...")
cookies = pickle.load(open("cache/" + email1 + "/cookies.pkl", "rb"))
for cookie in cookies:
driver1_2.add_cookie(cookie)
driver1_3.add_cookie(cookie)
driver1_4.add_cookie(cookie)
driver1_2.refresh()
driver1_3.refresh()
driver1_4.refresh()
WebDriverWait(driver1_2, 5).until(EC.presence_of_all_elements_located)
WebDriverWait(driver1_3, 5).until(EC.presence_of_all_elements_located)
WebDriverWait(driver1_4, 5).until(EC.presence_of_all_elements_located)
driver1_2.find_element_by_class_name('Header__profile__a')
driver1_3.find_element_by_class_name('Header__profile__a')
driver1_4.find_element_by_class_name('Header__profile__a')
logging.info("Browser1_2 and Browser1_3 Login completed...")
#サブアカウント
if mode == 2:
try:
#ログイン状況チェック
logging.info("Check SubAccount login status...")
cookies = pickle.load(open("cache/" + email2 + "/cookies.pkl", "rb"))
for cookie in cookies:
driver2_1.add_cookie(cookie)
driver2_2.add_cookie(cookie)
driver2_3.add_cookie(cookie)
driver2_1.refresh()
driver2_2.refresh()
driver2_3.refresh()
WebDriverWait(driver2_1, 5).until(EC.presence_of_all_elements_located)
WebDriverWait(driver2_2, 5).until(EC.presence_of_all_elements_located)
WebDriverWait(driver2_3, 5).until(EC.presence_of_all_elements_located)
driver2_1.find_element_by_class_name('Header__profile__a')
driver2_2.find_element_by_class_name('Header__profile__a')
driver2_3.find_element_by_class_name('Header__profile__a')
logging.info("Logged in to SubAccount from saved information...")
except:
#ログインされていない場合
try:
logging.info("Browser2_1 Move page...(Yay!ログインページ)")
driver2_1.get('https://yay.space/login')
WebDriverWait(driver2_1, 5).until(EC.presence_of_all_elements_located)
except:
logging.error("Browser2_1 Connection timed out...!!")
sys.exit()
#ログイン情報記入 > ログインボタンクリック
logging.info("Browser2_1 Start login...")
driver2_1.find_element_by_name('email').send_keys(email2)
driver2_1.find_element_by_name('password').send_keys(password2)
driver2_1.find_element_by_class_name('Button.Button--less-rounded.Button--icon-login').click()
#ログイン読み込み待ち
for _ in range(50):
if driver2_1.current_url == "https://yay.space/timeline/following":
break
else:
time.sleep(0.1)
else:
logging.error("Browser2_1 Connection timed out...!!")
sys.exit()
#ログインクッキー保存
pickle.dump(driver2_1.get_cookies() , open("cache/" + email2 + "/cookies.pkl","wb"))
logging.info("Browser2_1 Login completed...")
#ブラウザー3 クッキーからログイン
logging.info("Browser2_2 and Browser2_3 Start login...")
cookies = pickle.load(open("cache/" + email2 + "/cookies.pkl", "rb"))
for cookie in cookies:
driver2_2.add_cookie(cookie)
driver2_3.add_cookie(cookie)
driver2_2.refresh()
driver2_3.refresh()
WebDriverWait(driver2_2, 5).until(EC.presence_of_all_elements_located)
WebDriverWait(driver2_3, 5).until(EC.presence_of_all_elements_located)
driver2_2.find_element_by_class_name('Header__profile__a')
driver2_3.find_element_by_class_name('Header__profile__a')
logging.info("Browser2_2 and Browser2_3 Login completed...")
#----------------------------------------------------------------------------------------------------#
#ログインユーザーのステータス
my_id = driver1_1.find_element_by_class_name('Header__profile__a').get_attribute("href")
my_name = driver1_1.find_element_by_class_name('Nickname__span').text
try:
driver1_1.find_element_by_class_name('ImageLoader.Avatar.Avatar--vip')
d_vip = "Enable"
except:
d_vip = "Disable"
if mode == 1:
logging.info("Login Status\n< Main Account >\nUSERID:["+my_id.replace("https://yay.space", "") + "] NAME:["+my_name + "] VIP:"+d_vip + "\n")
if mode == 2:
sub_my_id = driver2_1.find_element_by_class_name('Header__profile__a').get_attribute("href")
sub_my_name = driver2_1.find_element_by_class_name('Nickname__span').text
try:
driver2_1.find_element_by_class_name('ImageLoader.Avatar.Avatar--vip')
sub_d_vip = "Enable"
except:
sub_d_vip = "Disable"
logging.info("Login Status\n< Main Account >\nUSERID:["+my_id.replace("https://yay.space", "") + "] NAME:["+my_name + "] VIP:"+d_vip +
"\n< Sub Account >\nUSERID:["+sub_my_id.replace("https://yay.space", "") + "] NAME:["+sub_my_name + "] VIP:"+sub_d_vip + "\n")
#----------------------------------------------------------------------------------------------------#
# 前日比の自動投稿
def job():
# マイページ移動(driver1_4)
driver1_4.find_element_by_class_name('Header__profile').click()
WebDriverWait(driver1_4, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="main"]/div/div[1]/div/div/div')))
# 現在の値を取得
n_posts = driver1_4.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/div/div[2]/dl/div[1]/a/dd').text
n_follow = driver1_4.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/div/div[2]/dl/div[4]/a/dd').text
n_follower = driver1_4.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/div/div[2]/dl/div[3]/a/dd').text
n_letter = driver1_4.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/div/div[2]/dl/div[2]/a/dd').text
# jsonを読み込み
with open("cache/" + email1 + "/comparison_date.json") as f:
date = json.load(f)
# 前日と比較(前日との数値の差) = 現在値 - 前日値
posts = int(n_posts.replace(",", "")) - date["posts"]
follow = int(n_follow.replace(",", "")) - date["follow"]
follower = int(n_follower.replace(",", "")) - date["follower"]
letter = int(n_letter.replace(",", "")) - date["letter"]
# 前日の差と比較 = 計算した差 - 前日差
y_posts = posts - date["yesterday_posts"]
y_follow = follow - date["yesterday_follow"]
y_follower = follower - date["yesterday_follower"]
y_letter = letter - date["yesterday_letter"]
# 前日比を投稿
sent = """こちらは前日比の集計結果です。
投稿した数 : {0}(前日比 : {1})
フォローした数 : {2}(前日比 : {3})
フォローされた数 : {4}(前日比 : {5})
レターされた数 : {6}(前日比 : {7})
""".format(posts, y_posts, follow, y_follow, follower, y_follower, letter, y_letter)
driver1_4.get("https://yay.space/timeline/following")
WebDriverWait(driver1_4, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="main"]/div/div[2]/div/div[1]/div[1]/form/div/div[1]/textarea')))
driver1_4.find_element_by_class_name('PostBox__body.PostBox__body--color-0.PostBox__body--fz-0').click()
for sent_text in sent.splitlines():
driver1_4.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/form/div/div[1]/div').send_keys(sent_text)
driver1_4.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/form/div/div[1]/div').send_keys(Keys.SHIFT, Keys.ENTER)
driver1_4.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/form/div/div[1]/div').send_keys(Keys.ENTER)
# 値を書き込み、保存
str = {"posts": int(n_posts.replace(",", "")), "likes": 0, "follow": int(n_follow.replace(",", "")), "follower": int(n_follower.replace(",", "")), "rt_to": 0, "rt_me": 0, "letter": int(n_letter.replace(",", "")), "yesterday_posts": posts, "yesterday_likes": 0, "yesterday_follow": follow, "yesterday_follower": follower, "yesterday_rt_to": 0, "yesterday_rt_me": 0, "yesterday_letter": letter}
with open("cache/" + email1 + "/comparison_date.json", mode='w') as f:
json.dump(str, f, indent=2, ensure_ascii=False)
def auto_conpari():
schedule.every().day.at(config_ini.get('Mode', 'TimeToPost')).do(job)
while alive:
schedule.run_pending()
time.sleep(5)
#----------------------------------------------------------------------------------------------------#
def main():
#コマンドリスト
commands = ["help","speed","userid","icon","cover"]
#チャットページ 接続
try:
driver1_2.get('https://yay.space/timeline/all?modalMode=1')
WebDriverWait(driver1_2, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="modals"]/div[1]/div/div[2]/dl/a[1]')))
except:
logging.error("Connection timed out...!!")
sys.exit()
#チャット監視
while alive:
try:
#チャット画面の1番上部、textを監視
text_s = driver1_2.find_element_by_class_name('RecommendUsers__item.RecommendUsers__item--chatroom')
#textオブジェクト以外のエラー回避
text = text_s.find_element_by_class_name('RecommendUsers__item__p').text
except:
continue
if text in commands:
text_s.find_element_by_class_name('RecommendUsers__item__p').click()
WebDriverWait(driver1_2, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="modals"]/div[1]/div/div[2]/div/div/p[1]')))
last_mes = driver1_2.find_elements_by_class_name('Messages__item.Messages__item--self')[-1]
try: last_text = (last_mes.find_element_by_class_name('Messages__item__span.Messages__item__span--text').text).split("\n")[0]
except: continue
if last_text == "help":
driver1_2.find_element_by_class_name('ReplyForm__input').send_keys("以下のコマンドが使用できます")
driver1_2.find_element_by_class_name('ReplyForm__input').send_keys(Keys.SHIFT, Keys.ENTER)
driver1_2.find_element_by_class_name('ReplyForm__input').send_keys("help:コマンド表を表示します")
driver1_2.find_element_by_class_name('ReplyForm__input').send_keys(Keys.SHIFT, Keys.ENTER)
driver1_2.find_element_by_class_name('ReplyForm__input').send_keys("speed:処理速度を計測します")
driver1_2.find_element_by_class_name('ReplyForm__input').send_keys(Keys.SHIFT, Keys.ENTER)
driver1_2.find_element_by_class_name('ReplyForm__input').send_keys("userid:チャット相手のUserIDを確認します")
driver1_2.find_element_by_class_name('ReplyForm__input').send_keys(Keys.SHIFT, Keys.ENTER)
driver1_2.find_element_by_class_name('ReplyForm__input').send_keys("icon:チャット相手のアイコンを送信します")
driver1_2.find_element_by_class_name('ReplyForm__input').send_keys(Keys.SHIFT, Keys.ENTER)
driver1_2.find_element_by_class_name('ReplyForm__input').send_keys("cover:チャット相手の背景を送信します")
driver1_2.find_element_by_class_name('Button.Button--green.Button--icon-chat-send.Button--wrap-content').click()
elif last_text == "speed":
start = time.time()
driver1_2.find_element_by_class_name('ReplyForm__input').send_keys("Measuring...")
driver1_2.find_element_by_class_name('Button.Button--green.Button--icon-chat-send.Button--wrap-content').click()
elapsed_time = time.time() - start
driver1_2.find_element_by_class_name('ReplyForm__input').send_keys("{0}[sec]".format(elapsed_time))
driver1_2.find_element_by_class_name('Button.Button--green.Button--icon-chat-send.Button--wrap-content').click()
elif last_text == "userid":
op_userid = driver1_2.find_element_by_class_name('Modal__header__h2__a').get_attribute("href")
driver1_2.find_element_by_class_name('ReplyForm__input').send_keys(op_userid.replace("https://yay.space", ""))
driver1_2.find_element_by_class_name('Button.Button--green.Button--icon-chat-send.Button--wrap-content').click()
elif last_text == "icon":
y_header = driver1_2.find_element_by_class_name('Modal__header__h2__a')
URL = y_header.find_element_by_class_name('ImageLoader.Avatar').get_attribute("data-url")
request.urlretrieve(URL, "icon.jpg")
driver1_2.find_element(By.XPATH, '//input[@type="file"]').send_keys("/root/yay/icon.jpg")
while True:
last_mes = driver1_2.find_elements_by_class_name('Messages__item.Messages__item--self')[-1]
try:
last_mes.find_element_by_class_name('Messages__item__span.Messages__item__span--eternal_image')
break
except: pass
"""
elif last_text == "cover":
driver1_2.find_element_by_class_name('Modal__header__h2').click()
WebDriverWait(driver1_2, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="main"]/div/div[2]/div/div[1]/div[1]/div/div[1]/figure')))
URL = driver1_2.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/div/div[1]/figure').get_attribute("style")
driver1_2.save_screenshot('screenshot.png')
logging.info(URL)
request.urlretrieve(URL[23:-3], "cover.jpg")
driver1_2.back()
WebDriverWait(driver1_2, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="modals"]/div[1]/div/div[2]/div/div/p[1]')))
driver1_2.find_element(By.XPATH, '//input[@type="file"]').send_keys("/root/yay/cover.jpg")
while True:
last_mes = driver1_2.find_elements_by_class_name('Messages__item.Messages__item--self')[-1]
try:
last_mes.find_element_by_class_name('Messages__item__span.Messages__item__span--eternal_image')
break
except: pass
"""
driver1_2.get('https://yay.space/timeline/all?modalMode=1')
WebDriverWait(driver1_2, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="modals"]/div[1]/div/div[2]/dl/a[1]')))
#----------------------------------------------------------------------------------------------------#
def main_sub():
#チャットページ 接続
try:
driver2_2.get('https://yay.space/timeline/all?modalMode=1')
WebDriverWait(driver2_2, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="modals"]/div[1]/div/div[2]/dl/a[1]')))
except:
logging.error("Connection timed out...!!")
sys.exit()
#チャット監視
while alive:
x = 0
while x <= 40 and alive:
try:
#チャット画面の1番上部、textを監視
try: text_s = driver2_2.find_elements_by_class_name('RecommendUsers__item.RecommendUsers__item--chatroom')[x]
except:
x = 0
continue
x = x + 1
try: text_s.find_element_by_class_name('Badge')
except: continue
#textオブジェクト以外のエラー回避
text = text_s.find_element_by_class_name('RecommendUsers__item__p').text
except:
#textオブジェクト以外は既読して戻る
text_s.find_element_by_class_name('RecommendUsers__item__p').click()
WebDriverWait(driver2_2, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="modals"]/div[1]/div/div[2]/div/div/p[1]')))
driver2_2.get('https://yay.space/timeline/all?modalMode=1')
WebDriverWait(driver2_2, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="modals"]/div[1]/div/div[2]/dl/a[1]')))
continue
text_s.find_element_by_class_name('RecommendUsers__item__p').click()
WebDriverWait(driver2_2, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="modals"]/div[1]/div/div[2]/div/div/p[1]')))
last_mes = driver2_2.find_elements_by_class_name('Messages__item')[-1]
try: last_text = (last_mes.find_element_by_class_name('Messages__item__span.Messages__item__span--text').text).split("\n")[0]
except:
driver2_2.get('https://yay.space/timeline/all?modalMode=1')
WebDriverWait(driver2_2, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="modals"]/div[1]/div/div[2]/dl/a[1]')))
continue
payload = {'apikey':'DZZF3AKLPDL2kxbRRWXwvIwxbGUWEZQ7', 'query':last_text}
r = requests.request("POST", "https://api.a3rt.recruit-tech.co.jp/talk/v1/smalltalk", data=payload).json()
try: driver2_2.find_element_by_class_name('ReplyForm__input').send_keys(str(r['results'][0]['reply']))
except: pass
driver2_2.find_element_by_class_name('Button.Button--green.Button--icon-chat-send.Button--wrap-content').click()
driver2_2.get('https://yay.space/timeline/all?modalMode=1')
try: WebDriverWait(driver2_2, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="modals"]/div[1]/div/div[2]/dl/a[1]')))
except: continue
#----------------------------------------------------------------------------------------------------#
def auto_c():
#みんなの投稿ページ 接続
if mode == 1:
try:
driver1_1.get('https://yay.space/timeline/all')
WebDriverWait(driver1_1, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="main"]/div/div[2]/div/div[1]/div[2]/dl/div[1]')))
except:
logging.error("Browser1_1 Connection timed out...!!")
sys.exit()
if mode == 2:
try:
driver1_1.get('https://yay.space/timeline/all')
driver2_1.get('https://yay.space/timeline/all')
WebDriverWait(driver1_1, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="main"]/div/div[2]/div/div[1]/div[2]/dl/div[1]')))
WebDriverWait(driver2_1, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="main"]/div/div[2]/div/div[1]/div[2]/dl/div[1]')))
except:
logging.error("Browser1_1 or Browser2_1 Connection timed out...!!")
sys.exit()
###みんなの投稿 自動いいね繰り返し実行###
sel = 1 #2アカウント時の交換用
c_ok1 = 0 #いいね合計カウント(メイン)
c_ok2 = 0#いいね合計カウント(サブ)
c_all1 = 0 #選択されたみんなの投稿合計カウント(メイン)
c_all2 = 0 #選択されたみんなの投稿合計カウント(サブ)
while alive:
ok_list = [] #いいね済みのリスト
z = 1 #1ページに存在する投稿のいいね制限用
while z <= 15 and alive:
z = z + 1
if sel == 1:
#ひとつのタイムラインに対して要素を選択
element = driver1_1.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[2]/dl/div[{}]'.format(z))
#選択された要素までスクロール
actions = ActionChains(driver1_1)
actions.move_to_element(element)
actions.perform()
postid = element.find_element_by_class_name('Timeline__item__handle').get_attribute("href")
userid = element.find_element_by_class_name('Timeline__item__profile-img__a').get_attribute("href")
#いいね済みのスルー
if postid.replace("https://yay.space/post/", "") in ok_list:
continue
try:
element.find_element_by_class_name('Heart__path.Heart__path--liked')
continue
except: pass
#選択されたいいね済み以外みんなの投稿合計カウント
c_all1 = c_all1 + 1
#リプライ投稿のスルー
try:
element.find_element_by_class_name('ReplyTo')
continue
except: pass
#だれ通募集投稿のスルー
try:
element.find_element_by_class_name('Timeline__item__call')
continue
except: pass
if sel == 2:
#ひとつのタイムラインに対して要素を選択
element1 = driver2_1.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[2]/dl/div[{}]'.format(z))
#選択された要素までスクロール
actions = ActionChains(driver2_1)
actions.move_to_element(element1)
actions.perform()
postid1 = element1.find_element_by_class_name('Timeline__item__handle').get_attribute("href")
userid1 = element1.find_element_by_class_name('Timeline__item__profile-img__a').get_attribute("href")
#いいね済みのスルー
if postid1.replace("https://yay.space/post/", "") in ok_list:
continue
try:
element1.find_element_by_class_name('Heart__path.Heart__path--liked')
continue
except: pass
#選択されたいいね済み以外みんなの投稿合計カウント
c_all2 = c_all2 + 1
#リプライ投稿のスルー
try:
element1.find_element_by_class_name('ReplyTo')
continue
except: pass
#だれ通募集投稿のスルー
try:
element1.find_element_by_class_name('Timeline__item__call')
continue
except: pass
#いいね実行
if sel == 1:
try:
element.find_element_by_class_name('Heart__path').click()
c_ok1 = c_ok1 + 1
ok_list.append(postid.replace("https://yay.space/post/", ""))
except: continue
if mode == 2 and sel == 2:
try:
element1.find_element_by_class_name('Heart__path').click()
c_ok2 = c_ok2 + 1
ok_list.append(postid1.replace("https://yay.space/post/", ""))
except: continue
if sel == 1 and mode == 2: sel = 2
else:
if sel == 2: sel = 1
if mode == 1:
print("< Auto Like >\n(Main Account)\nUserID:{0} PostID:{1}".format(userid.replace("https://yay.space", ""), postid.replace("https://yay.space", "")) + "\n合計カウント(いいね/全投稿):{0} / {1}\r\033[4A".format(c_ok1, c_all1))
if mode == 2:
if c_all2 > 1:
print("< Auto Like >\n(Main Account)\nUserID:{0} PostID:{1}".format(userid.replace("https://yay.space", ""), postid.replace("https://yay.space", "")) + "\n合計カウント(いいね/全投稿):{0} / {1}".format(c_ok1, c_all1)
+ "\n(Sub Account)\nUserID:{0} PostID:{1}".format(userid1.replace("https://yay.space", ""), postid1.replace("https://yay.space", "")) + "\n合計カウント(いいね/全投稿):{0} / {1}\r\033[7A".format(c_ok2, c_all2))
else:
print("< Auto Like >\n(Main Account)\nUserID:{0} PostID:{1}".format(userid.replace("https://yay.space", ""), postid.replace("https://yay.space", "")) + "\n合計カウント(いいね/全投稿):{0} / {1}\r\033[4A".format(c_ok1, c_all1))
#いいねする間隔調整
if mode == 1:
time.sleep(2)
if mode == 2:
time.sleep(1)
#みんなの投稿ページリロード
for _ in range(3): # 最大3回実行
try:
if mode == 1:
driver1_1.refresh()
WebDriverWait(driver1_1, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="main"]/div/div[2]/div/div[1]/div[2]/dl/div[1]')))
if mode == 2:
driver1_1.refresh()
driver2_1.refresh()
WebDriverWait(driver1_1, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="main"]/div/div[2]/div/div[1]/div[2]/dl/div[1]')))
WebDriverWait(driver2_1, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="main"]/div/div[2]/div/div[1]/div[2]/dl/div[1]')))
except Exception as e:
pass # 必要であれば失敗時の処理
else:
print("\nPage reload..." + "\r\033[2A")
z = 1
break # 失敗しなかった時はループを抜ける
else:
logging.error("Reload failed...!!")
return
#----------------------------------------------------------------------------------------------------#
if __name__ == "__main__":
try:
login()
threading.Thread(target = auto_c).start()
threading.Thread(target = main).start()
threading.Thread(target = main_sub).start()
if config_ini.get('Mode', 'AutoPostComparison') == "True":
if not os.path.exists("cache/" + email1 + "/comparison_date.json"):
driver1_4.find_element_by_class_name('Header__profile').click()
WebDriverWait(driver1_4, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="main"]/div/div[1]/div/div/div')))
posts = driver1_4.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/div/div[2]/dl/div[1]/a/dd').text
follow = driver1_4.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/div/div[2]/dl/div[4]/a/dd').text
follower = driver1_4.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/div/div[2]/dl/div[3]/a/dd').text
letter = driver1_4.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/div/div[2]/dl/div[2]/a/dd').text
str = {"posts": int(posts.replace(",", "")), "likes": 0, "follow": int(follow.replace(",", "")), "follower": int(follower.replace(",", "")), "rt_to": 0, "rt_me": 0, "letter": int(letter.replace(",", "")), "yesterday_posts": 0, "yesterday_likes": 0, "yesterday_follow": 0, "yesterday_follower": 0, "yesterday_rt_to": 0, "yesterday_rt_me": 0, "yesterday_letter": 0}
with open("cache/" + email1 + "/comparison_date.json", mode='w') as f:
json.dump(str, f, indent=2, ensure_ascii=False)
threading.Thread(target = auto_conpari).start()
thread_list = threading.enumerate()
thread_list.remove(threading.main_thread())
for thread in thread_list:
thread.join()
except KeyboardInterrupt:
alive = False
time.sleep(5)
#Ctrl+Cによるプログラム強制終了によるブラウザ強制終了対策
#ドライバーを終了させる
logging.warning("KeyboardInterruptをキャッチしたため、ブラウザを強制終了します")
driver1_1.quit()
driver1_2.quit()
driver1_3.quit()
if mode == 2:
driver2_1.quit()
driver2_2.quit()
driver2_3.quit()
pass
|
base_noninterleaving_notification_ids_tests.py
|
from abc import ABC, abstractmethod
from threading import Event, Thread
from unittest import TestCase
from uuid import uuid4
from eventsourcing.persistence import ApplicationRecorder, StoredEvent
class NonInterleavingNotificationIDsBaseCase(ABC, TestCase):
insert_num = 1000
def test(self):
recorder = self.create_recorder()
race_started = Event()
originator1_id = uuid4()
originator2_id = uuid4()
stack1 = self.create_stack(originator1_id)
stack2 = self.create_stack(originator2_id)
errors = []
def insert_stack(stack):
try:
race_started.wait()
recorder.insert_events(stack)
except Exception as e:
errors.append(e)
thread1 = Thread(target=insert_stack, args=(stack1,), daemon=True)
thread2 = Thread(target=insert_stack, args=(stack2,), daemon=True)
thread1.start()
thread2.start()
race_started.set()
thread1.join()
thread2.join()
if errors:
raise errors[0]
notifications = recorder.select_notifications(start=1, limit=1000000)
ids_for_sequence1 = [
e.id for e in notifications if e.originator_id == originator1_id
]
ids_for_sequence2 = [
e.id for e in notifications if e.originator_id == originator2_id
]
max_id_for_sequence1 = max(ids_for_sequence1)
max_id_for_sequence2 = max(ids_for_sequence2)
min_id_for_sequence1 = min(ids_for_sequence1)
min_id_for_sequence2 = min(ids_for_sequence2)
if max_id_for_sequence1 > min_id_for_sequence2:
self.assertGreater(min_id_for_sequence1, max_id_for_sequence2)
else:
self.assertGreater(min_id_for_sequence2, max_id_for_sequence1)
def create_stack(self, originator_id):
return [
StoredEvent(
originator_id=originator_id,
originator_version=i,
topic="",
state=b"",
)
for i in range(self.insert_num)
]
@abstractmethod
def create_recorder(self) -> ApplicationRecorder:
pass
|
joinEx.py
|
from threading import Thread
from random import randint
import sys
NTHRDS = 6
def theWorks(n): # main function of the "worker thread"
r = 0
for i in range(1000000): # do lots of work
r += randint(0,50)
print('I am {}, the result is: {}'.format(n, r))
sys.exit()
threads = [] # creates a list of threads
for i in range(NTHRDS):
threads.append(Thread(target=theWorks, args=[i]))
threads[i].start()
for i in range(NTHRDS):
threads[i].join()
|
ssh.py
|
#!/usr/bin/env python3
"""
DMLC submission script by ssh
One need to make sure all slaves machines are ssh-able.
"""
from __future__ import absolute_import
from multiprocessing import Pool, Process
import os, subprocess, logging
from threading import Thread
from . import tracker
def sync_dir(local_dir, slave_node, slave_dir):
"""
sync the working directory from root node into slave node
"""
remote = slave_node[0] + ':' + slave_dir
logging.info('rsync %s -> %s', local_dir, remote)
prog = 'rsync -az --rsh="ssh -o StrictHostKeyChecking=no -p %s" %s %s' % (
slave_node[1], local_dir, remote)
subprocess.check_call([prog], shell = True)
def get_env(pass_envs):
envs = []
# get system envs
keys = ['OMP_NUM_THREADS', 'KMP_AFFINITY', 'LD_LIBRARY_PATH', 'AWS_ACCESS_KEY_ID',
'AWS_SECRET_ACCESS_KEY', 'DMLC_INTERFACE']
for k in keys:
v = os.getenv(k)
if v is not None:
envs.append('export ' + k + '=' + v + ';')
# get ass_envs
for k, v in pass_envs.items():
envs.append('export ' + str(k) + '=' + str(v) + ';')
return (' '.join(envs))
def submit(args):
assert args.host_file is not None
with open(args.host_file) as f:
tmp = f.readlines()
assert len(tmp) > 0
hosts=[]
for h in tmp:
if len(h.strip()) > 0:
# parse addresses of the form ip:port
h = h.strip()
i = h.find(":")
p = "22"
if i != -1:
p = h[i+1:]
h = h[:i]
# hosts now contain the pair ip, port
hosts.append((h, p))
def ssh_submit(nworker, nserver, pass_envs):
"""
customized submit script
"""
# thread func to run the job
def run(prog):
subprocess.check_call(prog, shell = True)
# sync programs if necessary
local_dir = os.getcwd()+'/'
working_dir = local_dir
if args.sync_dst_dir is not None and args.sync_dst_dir != 'None':
working_dir = args.sync_dst_dir
pool = Pool(processes=len(hosts))
for h in hosts:
pool.apply_async(sync_dir, args=(local_dir, h, working_dir))
pool.close()
pool.join()
# launch jobs
for i in range(nworker + nserver):
pass_envs['DMLC_ROLE'] = 'server' if i < nserver else 'worker'
(node, port) = hosts[i % len(hosts)]
logging.debug("SSH-ing to %s:%s", node, port)
pass_envs['DMLC_NODE_HOST'] = node
prog = get_env(pass_envs) + ' cd ' + working_dir + '; ' + (' '.join(args.command))
prog = 'ssh -o StrictHostKeyChecking=no ' + node + ' -p ' + port + ' \'' + prog + '\''
thread = Thread(target = run, args=(prog,))
thread.setDaemon(True)
thread.start()
return ssh_submit
tracker.submit(args.num_workers, args.num_servers,
fun_submit=ssh_submit,
pscmd=(' '.join(args.command)),
hostIP=args.host_ip)
|
game.py
|
import numpy as np
import os
from utils.config import Config
import tkinter as tk
import tkinter.messagebox
from PIL import Image, ImageTk
from cores.color_board import COLOR_BOARD
import matplotlib.pyplot as plt
from utils.perlin_noise import PerlinNoiseFactory
from utils.ca_cave import CA_CaveFactory
from utils import utils, score
import copy
from threading import Thread
import time
from cores.vehicle import vehicleStatus, ACTION_MAP
import random
import pygame
class App:
_img = None
_score = '0'
_step = '0'
def __init__(self, game):
pygame.init()
self.game = game
self.width, self.height = game.get_resolution()
display_size = (self.width, self.height + 100)
self.display = pygame.display.set_mode(display_size)
self.running = True
self.lazy = True
# self.start()
def update_score(self, score=0):
self._score = str(score)
self.lazy = False
def update_step(self, step=0):
self._step = str(step)
self.lazy = False
def update_img(self, img=None):
self._img = np.transpose(img, (1, 0, 2))
self.lazy = False
def close(self):
self.running = False
def run(self):
running = True
while running and self.running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_w or event.key == pygame.K_UP:
self.game.move_by_action(1)
elif event.key == pygame.K_s or event.key == pygame.K_DOWN:
self.game.move_by_action(3)
elif event.key == pygame.K_a or event.key == pygame.K_LEFT:
self.game.move_by_action(4)
elif event.key == pygame.K_d or event.key == pygame.K_RIGHT:
self.game.move_by_action(2)
if not self.lazy:
self.display.fill((0, 0, 0))
if self._img is not None:
surf = pygame.surfarray.make_surface(self._img)
self.display.blit(surf, (0, 100))
font = pygame.font.Font(None, 30)
score_label = font.render("Score: {:s}".format(self._score), True, (255, 255, 255))
self.display.blit(score_label, (self.width // 3, 10))
step_label = font.render("Step: {:s}".format(self._step), True, (255, 255, 255))
self.display.blit(step_label, (self.width // 3, 50))
pygame.display.update()
self.lazy = True
pygame.quit()
class Game:
_config = None
_world_width = 1
_world_height = 1
_window = None
_canvas = None
_cell_width = 1
_cell_height = 1
manual_control = False
world = None
trajectory_map = None
discovered_map = None
bg_world = None
n_vehicles = 1
vehicle_status = {}
vehicle_distance = None
step = 0
score = 0
scoreRecorder = None
action_map = ACTION_MAP
action_space = ACTION_MAP.__len__()
def __init__(self, config_path=None):
if config_path is None:
config_path = os.path.join('resources', 'configs', '2d.json')
self.config = Config.load_config(config_path)
self._world_width, self._world_height = self.config['world_size']
self.n_vehicles = self.config['num_vehicles']
self._cell_width, self._cell_height = self.config['cell_size']
self.seed(self.config['seed'])
self.setup()
@staticmethod
def seed(_seed=123):
random.seed(_seed)
np.random.seed(_seed)
def get_resolution(self):
return (self._world_width * self._cell_width,
self._world_height * self._cell_height)
def setup(self):
self.reset()
# self._window = App(self)
Thread(target=self.init_window).start()
time.sleep(1)
self.render_new_frame()
def init_window(self, ):
self._window = App(self)
self._window.run()
def destroy_window(self):
self._window.close()
self._window = None
def reset(self):
self.step = 0
self.score = 0
# Generate Obstacle
caf = CA_CaveFactory(self._world_height, self._world_width, self.config['ca_cave_open_prob'])
self.world = caf.get_map()
idx_x, idx_y = np.where(self.world == 1)
# Generate Curiosity && Replace WALL TO Curiosity By perlin_prob
perlin_res = self.config['perlin_res']
pnf = PerlinNoiseFactory(3, octaves=4, tile=(self._world_height / perlin_res,
self._world_width / perlin_res, 1))
for co_idx in range(len(idx_x)):
n = (pnf(idx_x[co_idx] / perlin_res, idx_y[co_idx] / perlin_res, 1) + 1) / 2
if n > self.config['perlin_prob']:
self.world[idx_x[co_idx]][idx_y[co_idx]] = 3
# Rescale map value
# 0 -> wall
# 1 -> Road
# 2~100 -> curiosity
# 101~150 -> vehicle
self.world[self.world > 0] -= 1
# Init the visualization window
self.bg_world = np.zeros((self._world_height * self._cell_height, self._world_width * self._cell_width, 3))
self.setup_vehicles()
self.render_background()
self.scoreRecorder = score.scoreRecorder(self.world, self.vehicle_status[0].receptive_radius)
if self._window:
self.render_new_frame()
def setup_vehicles(self):
"""
Init the vehicle settings
"""
self.trajectory_map = {}
self.discovered_map = {}
for vehicle_id in range(self.n_vehicles):
self.vehicle_status[vehicle_id] = vehicleStatus()
_loc = utils.generate_next_vehicle_random_pose(self.world)
self.vehicle_status[vehicle_id].position = _loc
self.vehicle_status[vehicle_id].direction = np.random.randint(1, 5)
self.vehicle_status[vehicle_id].receptive_radius = self.config['vehicle_receptive_radius'][vehicle_id]
self.vehicle_status[vehicle_id].communication_dis = self.config['vehicle_communication_dis'][vehicle_id]
self.world[_loc[0], _loc[1]] = 101 + vehicle_id
self.trajectory_map[vehicle_id] = np.zeros(self.world.shape)
self.discovered_map[vehicle_id] = np.zeros(self.world.shape)
self.vehicle_distance = np.zeros((self.n_vehicles, self.n_vehicles))
def render_background(self):
for c in np.unique(self.world):
if c > 100:
color = 1
else:
color = int(c)
idx_x, idx_y = np.where(self.world == c)
for _width in range(self._cell_width):
for _height in range(self._cell_height):
self.bg_world[idx_x * self._cell_height + _height, idx_y * self._cell_width + _width, :] = \
COLOR_BOARD[color]
def render_world(self):
vis_map = copy.deepcopy(self.bg_world)
for vehicle_id in range(self.n_vehicles):
vehicle_x, vehicle_y = self.vehicle_status[vehicle_id].position
xx, yy = utils.generate_vehicle_coverage_idx(vehicle_x,
vehicle_y,
self._cell_width,
self._cell_height,
self._cell_width)
vis_x, vis_y = np.where(self.trajectory_map[vehicle_id] == 1)
if len(vis_x) > 0:
for _width in range(self._cell_width):
for _height in range(self._cell_height):
vis_map[vis_x * self._cell_height + _height, vis_y * self._cell_width + _width, :] = \
COLOR_BOARD[151 + vehicle_id]
vis_map[xx, yy, :] = COLOR_BOARD[101 + vehicle_id]
return vis_map.astype('uint8')
def render_new_frame(self, ):
vis_map = self.render_world()
if self._window:
self._window.update_img(vis_map)
self._window.update_score(self.score)
self._window.update_step(self.step)
def move_by_action(self, action, vehicle_id=0):
self.step += 1
dx, dy = ACTION_MAP[action]
x, y = self.vehicle_status[vehicle_id].position
done = self.world[x + dx][y + dy] != 1
if done:
if self.manual_control:
root = tk.Tk()
root.withdraw()
tkinter.messagebox.showinfo(title='Failed !',
message='You score is ' + str(self.score) + ' After ' + str(
self.step) + ' steps') #
self.reset()
else:
self.vehicle_status[vehicle_id].position = [x + dx, y + dy]
self.world[x + dx][y + dy] = 101 + vehicle_id
self.world[x][y] = 1
self.trajectory_map[vehicle_id][x][y] = 1
self.vehicle_status[vehicle_id].direction = action
self.update_distance_of_vehicles(vehicle_id, x + dx, y + dy)
self.render_new_frame()
obs = self.get_observation()
self.score = self.scoreRecorder.get_score()
return done, obs, [x + dx, y + dy]
def enable_manual_control(self, enable=True):
self.manual_control = enable
def get_observation(self, vehicle_id=0):
action = self.vehicle_status[vehicle_id].direction
x, y = self.vehicle_status[vehicle_id].position
dx, dy = ACTION_MAP[action]
xx = np.arange(x - self.vehicle_status[vehicle_id].receptive_radius,
x + self.vehicle_status[vehicle_id].receptive_radius + 1, 1)
yy = np.arange(y - self.vehicle_status[vehicle_id].receptive_radius,
y + self.vehicle_status[vehicle_id].receptive_radius + 1, 1)
xx[(xx < 0) | (xx >= self._world_height)] = 0
yy[(yy < 0) | (yy >= self._world_width)] = 0
xx, yy = np.meshgrid(xx, yy, sparse=True)
obs = np.transpose(self.world[xx, yy])
# discovered = np.transpose(self.world[xx, yy])
central = self.vehicle_status[vehicle_id].receptive_radius + 1
# the central of observation is the drone itself
obs[central - 1][central - 1] = - 2
self.scoreRecorder.push_data(obs, x, y)
radius = self.vehicle_status[vehicle_id].receptive_radius
dis_s = radius - radius // 2
dis_e = radius + radius // 2 + 1
obs_idx_x, obs_idx_y = np.where(obs[dis_s:dis_e, dis_s:dis_e] != -1)
obs_idx_x = obs_idx_x + x - radius + dis_e
obs_idx_y = obs_idx_y + y - radius + dis_e
obs_idx_x[(obs_idx_x < 0) | (obs_idx_x >= self._world_height)] = 0
obs_idx_y[(obs_idx_y < 0) | (obs_idx_y >= self._world_width)] = 0
self.discovered_map[vehicle_id][obs_idx_x, obs_idx_y] = 1
discovered = np.transpose(self.discovered_map[vehicle_id][xx, yy])
trajectory = np.transpose(self.trajectory_map[vehicle_id][xx, yy])
return np.concatenate([np.expand_dims(obs, 0),
np.expand_dims(discovered, 0),
np.expand_dims(trajectory, 0)])
def get_world_info(self):
return self.world.shape
def update_distance_of_vehicles(self, vehicle_id, x, y):
for i in range(self.n_vehicles):
if i == vehicle_id:
dis = 0
else:
loc1 = self.vehicle_status[i].position
dis = np.linalg.norm(np.array(loc1) - np.array([x, y]))
self.vehicle_distance[i][vehicle_id] = dis
self.vehicle_distance[vehicle_id][i] = dis
def swap_vehicle_infomation(self, ):
for i in range(self.n_vehicles):
for j in range(1, self.n_vehicles):
dis = self.vehicle_distance[i][j]
if dis < self.vehicle_status[i].communication_dis and dis < self.vehicle_status[j].communication_dis:
self.trajectory_map[i] = self.trajectory_map[j] = \
(self.trajectory_map[i] | self.trajectory_map[j]).astype('uint8')
self.discovered_map[i] = self.discovered_map[j] = \
(self.discovered_map[i] | self.discovered_map[j]).astype('uint8')
def get_vehicle_status(self, vehicle_id=0):
if vehicle_id not in self.vehicle_status:
return None
else:
return self.vehicle_status[vehicle_id].__dict__
def get_obs_info(self):
info = {}
for key in np.unique(self.world):
if key == -1:
info[key] = 'Unknown'
elif key < -1:
info[key] = 'some vehicle'
elif key == 0:
info[key] = 'Obstacle'
elif key == 1:
info[key] = 'Road'
elif key < 101:
info[key] = "Curiosity_{:d}".format(key - 1)
elif key < 151:
info[key] = "Vehicle_{:d}".format(key - 100)
info['obs_shape'] = self.get_observation()[0].shape
return info
def get_score(self):
self.score = self.scoreRecorder.get_score()
return self.score
|
produce_consumer.py
|
#!/usr/bin/env python
# _*_coding:utf-8_*_
import time
import random
import queue
import threading
q = queue.Queue()
def producer(name):
count = 0
while count < 10:
print("making........")
time.sleep(random.randrange(3))
q.put(count)
print('Producer %s has produced %s baozi...' % (name, count))
count += 1
q.task_done()
# q.join()
print("ok......")
def consumer(name):
count = 0
while count < 10:
time.sleep(random.randrange(4))
if not q.empty():
data = q.get()
# q.task_done()
q.join()
print(data)
print('\033[32;1mConsumer %s has eat %s baozi...\033[0m' %(name, data))
else:
print("-----no baozi anymore----")
count += 1
p1 = threading.Thread(target=producer, args=('A',))
c1 = threading.Thread(target=consumer, args=('B',))
c2 = threading.Thread(target=consumer, args=('C',))
c3 = threading.Thread(target=consumer, args=('D',))
p1.start()
c1.start()
c2.start()
c3.start()
|
studio.py
|
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import multiprocessing
import subprocess
import sys
from pathlib import Path
from threading import Thread
import click
@click.group(name="scenario")
def scenario_cli():
pass
@scenario_cli.command(name="build", help="Generate a single scenario")
@click.option(
"--clean",
is_flag=True,
default=False,
help="Clean previously generated artifacts first",
)
@click.argument("scenario", type=click.Path(exists=True), metavar="<scenario>")
def build_scenario(clean, scenario):
_build_single_scenario(clean, scenario)
def _build_single_scenario(clean, scenario):
import importlib.resources as pkg_resources
from smarts.sstudio.sumo2mesh import generate_glb_from_sumo_network
click.echo(f"build-scenario {scenario}")
if clean:
_clean(scenario)
scenario_root = Path(scenario)
map_net = scenario_root / "map.net.xml"
map_glb = scenario_root / "map.glb"
generate_glb_from_sumo_network(str(map_net), str(map_glb))
requirements_txt = scenario_root / "requirements.txt"
if requirements_txt.exists():
import zoo.policies
with pkg_resources.path(zoo.policies, "") as path:
# Serve policies through the static file server, then kill after
# we've installed scenario requirements
pip_index_proc = subprocess.Popen(
["twistd", "-n", "web", "--path", path],
# Hide output to keep display simple
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
pip_install_cmd = [
sys.executable,
"-m",
"pip",
"install",
"-r",
str(requirements_txt),
]
click.echo(
f"Installing scenario dependencies via '{' '.join(pip_install_cmd)}'"
)
try:
subprocess.check_call(pip_install_cmd, stdout=subprocess.DEVNULL)
finally:
pip_index_proc.terminate()
pip_index_proc.wait()
scenario_py = scenario_root / "scenario.py"
if scenario_py.exists():
subprocess.check_call([sys.executable, scenario_py])
@scenario_cli.command(
name="build-all",
help="Generate all scenarios under the given directories",
)
@click.option(
"--clean",
is_flag=True,
default=False,
help="Clean previously generated artifacts first",
)
@click.argument("scenarios", nargs=-1, metavar="<scenarios>")
def build_all_scenarios(clean, scenarios):
if not scenarios:
# nargs=-1 in combination with a default value is not supported
# if scenarios is not given, set /scenarios as default
scenarios = ["scenarios"]
builder_threads = {}
for scenarios_path in scenarios:
path = Path(scenarios_path)
for p in path.rglob("*.net.xml"):
scenario = f"{scenarios_path}/{p.parent.relative_to(scenarios_path)}"
builder_thread = Thread(
target=_build_single_scenario, args=(clean, scenario)
)
builder_thread.start()
builder_threads[p] = builder_thread
for scenario_path, builder_thread in builder_threads.items():
click.echo(f"Waiting on {scenario_path} ...")
builder_thread.join()
@scenario_cli.command(name="clean")
@click.argument("scenario", type=click.Path(exists=True), metavar="<scenario>")
def clean_scenario(scenario):
_clean(scenario)
def _clean(scenario):
to_be_removed = [
"map.glb",
"bubbles.pkl",
"missions.pkl",
"flamegraph-perf.log",
"flamegraph.svg",
"flamegraph.html",
"*.rou.xml",
"*.rou.alt.xml",
"social_agents/*",
"traffic/*",
"history_mission.pkl",
]
p = Path(scenario)
for file_name in to_be_removed:
for f in p.glob(file_name):
# Remove file
f.unlink()
@scenario_cli.command(name="replay")
@click.option("-d", "--directory", multiple=True)
@click.option("-t", "--timestep", default=0.01, help="Timestep in seconds")
@click.option("--endpoint", default="ws://localhost:8081")
def replay(directory, timestep, endpoint):
from envision.client import Client as Envision
for path in directory:
jsonl_paths = list(Path(path).glob("*.jsonl"))
click.echo(
f"Replaying {len(jsonl_paths)} record(s) at path={path} with "
f"timestep={timestep}s"
)
with multiprocessing.pool.ThreadPool(len(jsonl_paths)) as pool:
pool.starmap(
Envision.read_and_send,
[(jsonl, endpoint, timestep) for jsonl in jsonl_paths],
)
scenario_cli.add_command(build_scenario)
scenario_cli.add_command(build_all_scenarios)
scenario_cli.add_command(clean_scenario)
scenario_cli.add_command(replay)
|
test_ftplib.py
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import io
import errno
import os
import threading
import time
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase, skipUnless
from test import support
from test.support import HOST, HOSTv6
TIMEOUT = 3
# the dummy data returned by server over the data channel when
# RETR, LIST, NLST, MLSD commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
MLSD_DATA = ("type=cdir;perm=el;unique==keVO1+ZF4; test\r\n"
"type=pdir;perm=e;unique==keVO1+d?3; ..\r\n"
"type=OS.unix=slink:/foobar;perm=;unique==keVO1+4G4; foobar\r\n"
"type=OS.unix=chr-13/29;perm=;unique==keVO1+5G4; device\r\n"
"type=OS.unix=blk-11/108;perm=;unique==keVO1+6G4; block\r\n"
"type=file;perm=awr;unique==keVO1+8G4; writable\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; promiscuous\r\n"
"type=dir;perm=;unique==keVO1+1t2; no-exec\r\n"
"type=file;perm=r;unique==keVO1+EG4; two words\r\n"
"type=file;perm=r;unique==keVO1+IH4; leading space\r\n"
"type=file;perm=r;unique==keVO1+1G4; file1\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; incoming\r\n"
"type=file;perm=r;unique==keVO1+1G4; file2\r\n"
"type=file;perm=r;unique==keVO1+1G4; file3\r\n"
"type=file;perm=r;unique==keVO1+1G4; file4\r\n")
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024).decode('ascii')
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def push(self, what):
if self.baseclass.next_data is not None:
what = self.baseclass.next_data
self.baseclass.next_data = None
if not what:
return self.close_when_done()
super(DummyDTPHandler, self).push(what.encode('ascii'))
def handle_error(self):
raise Exception
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.next_data = None
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode('ascii')
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise Exception
def push(self, data):
asynchat.async_chat.push(self, data.encode('ascii') + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.socket() as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen()
sock.settimeout(TIMEOUT)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.socket(socket.AF_INET6) as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen()
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_opts(self, arg):
self.push('200 opts ok')
def cmd_mlsd(self, arg):
self.push('125 mlsd ok')
self.dtp.push(MLSD_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise Exception
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem")
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
context = ssl.SSLContext()
# TODO: fix TLSv1.3 support
context.options |= ssl.OP_NO_TLSv1_3
context.load_cert_chain(CERTFILE)
socket = context.wrap_socket(self.socket,
suppress_ragged_eofs=False,
server_side=True,
do_handshake_on_connect=False)
self.del_channel()
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
# TODO: SSLError does not expose alert information
elif "SSLV3_ALERT_BAD_CERTIFICATE" in err.args[1]:
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except OSError as err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
if getattr(self, '_ccc', False) is False:
super(SSLConnection, self).close()
else:
pass
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return b''
raise
def handle_error(self):
raise Exception
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
self._ccc = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_ccc(self, line):
self.push('220 Reverting back to clear-text')
self._ccc = True
self._do_ssl_shutdown()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def check_data(self, received, expected):
self.assertEqual(len(received), len(expected))
self.assertEqual(received, expected)
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r0')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, OSError,
EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.check_data(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
def callback(data):
received.append(data.decode('ascii'))
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', callback, rest=rest)
self.check_data(''.join(received), RETR_DATA[rest:])
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.check_data(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode('ascii'))
self.client.storbinary('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
self.client.storlines('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
f = io.StringIO(RETR_DATA.replace('\r\n', '\n'))
# storlines() expects a binary file, not a text file
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises(TypeError, self.client.storlines, 'stor foo', f)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_mlsd(self):
list(self.client.mlsd())
list(self.client.mlsd(path='/'))
list(self.client.mlsd(path='/', facts=['size', 'type']))
ls = list(self.client.mlsd())
for name, facts in ls:
self.assertIsInstance(name, str)
self.assertIsInstance(facts, dict)
self.assertTrue(name)
self.assertIn('type', facts)
self.assertIn('perm', facts)
self.assertIn('unique', facts)
def set_data(data):
self.server.handler_instance.next_data = data
def test_entry(line, type=None, perm=None, unique=None, name=None):
type = 'type' if type is None else type
perm = 'perm' if perm is None else perm
unique = 'unique' if unique is None else unique
name = 'name' if name is None else name
set_data(line)
_name, facts = next(self.client.mlsd())
self.assertEqual(_name, name)
self.assertEqual(facts['type'], type)
self.assertEqual(facts['perm'], perm)
self.assertEqual(facts['unique'], unique)
# plain
test_entry('type=type;perm=perm;unique=unique; name\r\n')
# "=" in fact value
test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type="ty=pe")
test_entry('type==type;perm=perm;unique=unique; name\r\n', type="=type")
test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type="t=y=pe")
test_entry('type=====;perm=perm;unique=unique; name\r\n', type="====")
# spaces in name
test_entry('type=type;perm=perm;unique=unique; na me\r\n', name="na me")
test_entry('type=type;perm=perm;unique=unique; name \r\n', name="name ")
test_entry('type=type;perm=perm;unique=unique; name\r\n', name=" name")
test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name="n am e")
# ";" in name
test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name="na;me")
test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=";name")
test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=";name;")
test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=";;;;")
# case sensitiveness
set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n')
_name, facts = next(self.client.mlsd())
for x in facts:
self.assertTrue(x.islower())
# no data (directory empty)
set_data('')
self.assertRaises(StopIteration, next, self.client.mlsd())
set_data('')
for x in self.client.mlsd():
self.fail("unexpected data %s" % x)
def test_makeport(self):
with self.client.makeport():
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_with_statement(self):
self.client.quit()
def is_client_connected():
if self.client.sock is None:
return False
try:
self.client.sendcmd('noop')
except (OSError, EOFError):
return False
return True
# base test
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertTrue(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# QUIT sent inside the with block
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# force a wrong response code to be sent on QUIT: error_perm
# is expected and the connection is supposed to be closed
try:
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = '550 error on quit'
except ftplib.error_perm as err:
self.assertEqual(str(err), '550 error on quit')
else:
self.fail('Exception not raised')
# needed to give the threaded server some time to set the attribute
# which otherwise would still be == 'noop'
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
def test_source_address(self):
self.client.quit()
port = support.find_unused_port()
try:
self.client.connect(self.server.host, self.server.port,
source_address=(HOST, port))
self.assertEqual(self.client.sock.getsockname()[1], port)
self.client.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_source_address_passive_connection(self):
port = support.find_unused_port()
self.client.source_address = (HOST, port)
try:
with self.client.transfercmd('list') as sock:
self.assertEqual(sock.getsockname()[1], port)
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"')
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = io.BytesIO(b'x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
@skipUnless(support.IPV6_ENABLED, "IPv6 not enabled")
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(len(''.join(received)), len(RETR_DATA))
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# TODO: fix TLSv1.3 support
ctx.options |= ssl.OP_NO_TLSv1_3
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
def test_ccc(self):
self.assertRaises(ValueError, self.client.ccc)
self.client.login(secure=True)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.ccc()
self.assertRaises(ValueError, self.client.sock.unwrap)
@skipUnless(False, "FIXME: bpo-32706")
def test_check_hostname(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# TODO: fix TLSv1.3 support
ctx.options |= ssl.OP_NO_TLSv1_3
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.check_hostname, True)
ctx.load_verify_locations(CAFILE)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
# 127.0.0.1 doesn't match SAN
self.client.connect(self.server.host, self.server.port)
with self.assertRaises(ssl.CertificateError):
self.client.auth()
# exception quits connection
self.client.connect(self.server.host, self.server.port)
self.client.prot_p()
with self.assertRaises(ssl.CertificateError):
with self.client.transfercmd("list") as sock:
pass
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.auth()
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.prot_p()
with self.client.transfercmd("list") as sock:
pass
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(20)
self.port = support.bind_port(self.sock)
self.server_thread = threading.Thread(target=self.server)
self.server_thread.start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
self.old_port = ftplib.FTP.port
ftplib.FTP.port = self.port
def tearDown(self):
ftplib.FTP.port = self.old_port
self.server_thread.join()
# Explicitly clear the attribute to prevent dangling thread
self.server_thread = None
def server(self):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
self.sock.listen()
# (1) Signal the caller that we are ready to accept the connection.
self.evt.set()
try:
conn, addr = self.sock.accept()
except socket.timeout:
pass
else:
conn.sendall(b"1 Hola mundo\n")
conn.shutdown(socket.SHUT_WR)
# (2) Signal the caller that it is safe to close the socket.
self.evt.set()
conn.close()
finally:
self.sock.close()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
class MiscTestCase(TestCase):
def test__all__(self):
blacklist = {'MSG_OOB', 'FTP_PORT', 'MAXLINE', 'CRLF', 'B_CRLF',
'Error', 'parse150', 'parse227', 'parse229', 'parse257',
'print_line', 'ftpcp', 'test'}
support.check__all__(self, ftplib, blacklist=blacklist)
def test_main():
tests = [TestFTPClass, TestTimeouts,
TestIPv6Environment,
TestTLS_FTPClassMixin, TestTLS_FTPClass,
MiscTestCase]
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
tenmoPg.py
|
#! /usr/bin/env nix-shell
#! nix-shell -i python3 -p "python3.withPackages(ps: [ps.numpy ps.psycopg2 ps.requests ps.websockets])"
import sys
import threading
from tenmoTypes import *
from tenmoGraph import universe_print_dot
import select
import time
import datetime
import pprint
import traceback
import io
import json
import psycopg2
import psycopg2.extensions
from psycopg2.extras import Json, DictCursor, RealDictCursor
def json_default(o):
if isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
class PgJson(Json):
def dumps(self, o):
return json.dumps(o, default=json_default)
conn = None
def getPgConn(pgUri: str):
global conn
if conn is None:
conn = psycopg2.connect(pgUri, cursor_factory=RealDictCursor)
return conn
def send(events : Sequence[Event], pgUri: str):
conn = getPgConn(pgUri)
for e in events:
event_type = type(e).__name__
with conn:
with conn.cursor() as cur:
cur.execute("INSERT INTO events(ulid, created_at, event_type, payload) VALUES (%s, %s, %s, %s)",
[e.event_ulid, e.timestamp, event_type, PgJson(e._asdict())])
def listen(pgUri: str, cb):
conn = getPgConn(pgUri)
listenConn(conn, cb)
def listenConn(conn, cb):
curs = conn.cursor()
curs.execute("LISTEN events_changed;")
seconds_passed = 0
while True:
conn.commit()
if select.select([conn],[],[],5) == ([],[],[]):
seconds_passed += 5
print("{} seconds passed without a notification...".format(seconds_passed))
else:
seconds_passed = 0
conn.poll()
conn.commit()
while conn.notifies:
notify = conn.notifies.pop()
cb(notify, conn)
def print_notify(notify, conn):
print("Got NOTIFY:", datetime.datetime.now(), notify.pid, notify.channel, notify.payload)
def ensure_entity(conn, curs, event):
p = event['payload']
if 'entity_id' in p:
curs.execute("INSERT INTO entities (entity_id, description) VALUES (%s, %s) ON CONFLICT DO NOTHING",
[p['entity_id'], p.get('entity_description', '')])
def ensure_process(conn, curs, event):
p = event['payload']
if 'process_id' in p and p['process_id'] is not None:
curs.execute("INSERT INTO process (process_id) VALUES (%s) ON CONFLICT DO NOTHING",
[p['process_id']])
def ensure_incarnation(conn, curs, event):
p = event['payload']
creator_id = p['execution_id']
if p['type'] == 'r':
creator_id = None
curs.execute("""INSERT INTO incarnations AS old (incarnation_id, entity_id, parent_id, creator_id, description)
VALUES (%s, %s, %s, %s, %s) ON CONFLICT (incarnation_id)
DO UPDATE SET creator_id = COALESCE(old.creator_id, EXCLUDED.creator_id)""",
[p['incarnation_id'], p.get('entity_id', None), p.get('parent_id', None), creator_id, p.get('incarnation_description', None)])
def insert_execution(conn, curs, event):
p = event['payload']
curs.execute("""INSERT INTO executions AS old
(execution_id, begin_timestamp, parent_id, creator_id, process_id, description)
VALUES (%s, %s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING""",
[p['execution_id'],
p['timestamp'],
p.get('parent_id', None),
p.get('creator_id', None),
p.get('process_id', None),
p.get('description', '')])
def finish_execution(conn, curs, event):
p = event['payload']
with conn.cursor() as c:
c.execute("""UPDATE executions AS old
SET end_timestamp = %s
WHERE execution_id = %s
RETURNING execution_id""",
[p['timestamp'], p['execution_id']])
return c.rowcount == 1
def insert_operation(conn, curs, event):
p = event['payload']
curs.execute("""INSERT INTO operations AS old
( operation_id
, ts
, execution_id
, op_type
, entity_id
, incarnation_id
, entity_description
, incarnation_description)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING""",
[p.get('operation_id', event['ulid'].strip()),
p.get('timestamp', event['created_at']),
p['execution_id'],
p['type'],
p.get('entity_id', None),
p['incarnation_id'],
p.get('entity_description', ''),
p.get('incarnation_description', '')])
def ensure_interaction(conn, curs, event):
p = event['payload']
curs.execute("""INSERT INTO interactions AS old
(interaction_id, ts, initiator_participant, responder_participant, description)
VALUES (%s, %s, %s, %s, %s) ON CONFLICT (interaction_id)
DO UPDATE
SET ts = COALESCE(old.ts, EXCLUDED.ts),
initiator_participant = COALESCE(old.initiator_participant, EXCLUDED.initiator_participant),
responder_participant = COALESCE(old.responder_participant, EXCLUDED.responder_participant),
description = COALESCE(old.description, EXCLUDED.description)""",
[p['interaction_id'],
p.get('timestamp', event['created_at']),
p['sender'],
p['target'],
p.get('interaction_description', None)])
def insert_message(conn, curs, event):
p = event['payload']
curs.execute("""INSERT INTO messages AS old
( message_id
, interaction_id
, ts
, sender
, target
, payload
, incarnations_ids)
VALUES (%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING""",
[p.get('message_id', event['ulid'].strip()),
p['interaction_id'],
p.get('timestamp', event['created_at']),
p['sender'],
p['target'],
p.get('payload', None),
p.get('incarnations_ids', None)])
def process_one_event(conn, curs, event):
print('process_one_event: ', event['ulid'])
pprint.pprint(event)
try:
if event['event_type'] == 'EventExecutionBegins':
ensure_process(conn, curs, event)
insert_execution(conn, curs, event)
return True
elif event['event_type'] == 'EventExecutionEnds':
return finish_execution(conn, curs, event)
elif event['event_type'] == 'EventOperation':
ensure_entity(conn, curs, event)
ensure_incarnation(conn, curs, event)
insert_operation(conn, curs, event)
return True
elif event['event_type'] == 'EventMessage':
ensure_interaction(conn, curs, event)
insert_message(conn, curs, event)
return True
except Exception as e:
pprint.pprint(e)
print(traceback.format_exc())
return False
def process_events_batch(pgUri, signal):
conn = psycopg2.connect(pgUri, cursor_factory=RealDictCursor)
n = 0
print('process_events_batch')
while True:
processed = 0
with conn.cursor() as curs:
print('select one to process')
curs.execute("SELECT * FROM events WHERE status = 'i' AND attempts < 50 LIMIT 1 FOR UPDATE")
for r in curs:
print('got', r['ulid'])
processed += 1
with conn.cursor() as c:
c.execute("UPDATE events SET status = 'c', attempts = attempts + 1 WHERE ulid = %s", [r['ulid']])
print('claimed', c.rowcount)
conn.commit()
with conn.cursor() as c:
if process_one_event(conn, c, r):
print('releasing')
c.execute("UPDATE events SET status = 'p' WHERE ulid = %s", [r['ulid']])
conn.commit()
print('comitted')
if processed == 0:
with conn.cursor() as curs:
print('populating graph')
curs.execute('call populate_graph()')
conn.commit()
signal.wait(30)
signal.clear()
continue
def clean_events(pgUri: str):
"""
Repeatedly unclaims events which stay in claimed mode longer than 5 seconds.
"""
conn = psycopg2.connect(pgUri, cursor_factory=RealDictCursor)
while True:
processed = 0
with conn:
with conn.cursor() as curs:
curs.execute("UPDATE events SET status = 'i' WHERE status = 'c' AND (clock_timestamp() - modified) > interval '00:00:05'")
processed = curs.rowcount
if processed > 0:
print('Un-claimed %d rows' % processed)
continue
time.sleep(5)
def process_events_forever(pgUri: str):
conn = getPgConn(pgUri)
curs = conn.cursor()
curs.execute("LISTEN events_changed;")
conn.commit()
signal = threading.Event()
worker = threading.Thread(target=process_events_batch, args=(pgUri,signal,))
worker.start()
cleaner = threading.Thread(target=clean_events, args=(pgUri,))
cleaner.start()
seconds_passed = 0
while True:
conn.commit()
if select.select([conn],[],[],5) == ([],[],[]):
seconds_passed += 5
print("{} seconds passed without a notification...".format(seconds_passed))
else:
seconds_passed = 0
conn.poll()
conn.commit()
while conn.notifies:
print('Got notification')
notify = conn.notifies.pop()
signal.set()
# cb(notify, conn)
worker.join()
cleaner.join()
def fromPgDict(r):
d = dict(r)
if 'stored_at' in d:
del d['stored_at']
return d
def entityFromPg(row):
ent = Entity(**fromPgDict(row))
return (row['entity_id'], ent._replace(incarnations = []))
def processFromPg(row):
return (row['process_id'], Process(**fromPgDict(row)))
def incarnationFromPg(row):
return (row['incarnation_id'], Incarnation(**fromPgDict(row)))
def operationFromPg(row):
return (row['operation_id'], Operation(**fromPgDict(row)))
def executionFromPg(row):
return (row['execution_id'], Execution(**fromPgDict(row)))
def interactionFromPg(row):
inter = Interaction(**fromPgDict(row))
return (row['interaction_id'], inter._replace(messages = []))
def messageFromPg(row):
return (row['message_id'], Message(**fromPgDict(row)))
def assertFromPg(row):
return Assert(**fromPgDict(row))
def load_universe(pgUri: str):
conn = getPgConn(pgUri)
with conn:
with conn.cursor() as c:
c.execute("SELECT * FROM executions")
executions = dict( executionFromPg(r) for r in c )
with conn.cursor() as c:
c.execute("SELECT * FROM incarnations")
incarnations = dict( incarnationFromPg(r) for r in c )
with conn.cursor() as c:
c.execute("SELECT * FROM operations")
operations = dict( operationFromPg(r) for r in c )
with conn.cursor() as c:
c.execute("SELECT * FROM processes")
processes = dict( processFromPg(r) for r in c )
with conn.cursor() as c:
c.execute("SELECT * FROM entities")
entities = dict( entityFromPg(r) for r in c )
with conn.cursor() as c:
c.execute("SELECT * FROM interactions")
interactions = dict( interactionFromPg(r) for r in c )
with conn.cursor() as c:
c.execute("SELECT * FROM messages")
messages = dict( messageFromPg(r) for r in c )
with conn.cursor() as c:
c.execute("SELECT * FROM asserts")
asserts = set( assertFromPg(r) for r in c )
for iid, i in incarnations.items():
entities[i.entity_id].incarnations.append(i.incarnation_id)
for mid, m in messages.items():
interactions[m.interaction_id].messages.append(m.message_id)
u = Universe(executions=executions, operations=operations, incarnations=incarnations, entities=entities, processes=processes, interactions=interactions, messages=messages, asserts=asserts)
# pprint.pprint(u)
return u
def serve(pgUri):
import tenmoServe
def serveUniverse(pgUri):
print('serving dot')
output = io.BytesIO()
u = load_universe(pgUri)
universe_print_dot(u, output)
return output.getvalue()
tenmoServe.serve(pgUri, '/dot', serveUniverse)
if __name__ == "__main__":
if sys.argv[2] == 'listen':
listen(sys.argv[1], print_notify)
elif sys.argv[2] == 'dot':
universe_print_dot(load_universe(sys.argv[1]))
elif sys.argv[2] == 'serve':
serve(sys.argv[1])
elif sys.argv[2] == 'process':
process_events_forever(sys.argv[1])
|
monitored_session_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
import os
import threading
import time
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import debug_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
class ScaffoldTest(test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.ready_for_local_init_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
variables.Variable(1, name='my_var')
variables.Variable(
2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES])
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
with self.test_session() as sess:
self.assertItemsEqual([b'my_var', b'my_local_var'],
sess.run(scaffold.ready_op))
self.assertItemsEqual([b'my_var'],
sess.run(scaffold.ready_for_local_init_op))
sess.run(scaffold.init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op)))
sess.run(scaffold.local_init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
constant_op.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
def test_caches_values(self):
with ops.Graph().as_default():
variables.Variable([1])
scaffold1 = monitored_session.Scaffold()
scaffold1.finalize()
scaffold2 = monitored_session.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.ready_for_local_init_op,
scaffold2.ready_for_local_init_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with ops.Graph().as_default():
variables.Variable([1])
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
with self.assertRaisesRegexp(RuntimeError, 'More than one item'):
monitored_session.Scaffold().finalize()
def test_uses_passed_values(self):
with ops.Graph().as_default():
variables.Variable([1])
saver = saver_lib.Saver()
scaffold = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.ready_for_local_init_op)
self.assertEqual(7, scaffold.local_init_op)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with ops.Graph().as_default():
variables.Variable([1])
monitored_session.Scaffold().finalize()
with self.assertRaisesRegexp(RuntimeError,
'Graph is finalized and cannot be modified'):
constant_op.constant([0])
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class FakeHook(session_run_hook.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = collections.Counter()
self.last_run_context = None
self.last_run_values = None
def begin(self):
self.call_counter['begin'] += 1
def after_create_session(self, session, coord): # pylint: disable=unused-argument
self.call_counter['after_create_session'] += 1
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
def end(self, session):
self.call_counter['end'] += 1
class MonitoredTrainingSessionTest(test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_summaries_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_steps')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=100) as session:
for _ in range(101):
session.run(new_gstep)
summaries = util_test.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summaries_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_secs')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=None,
save_summaries_secs=0.1) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(101):
session.run(new_gstep)
summaries = util_test.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_custom_saving(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
fake_hook = FakeHook()
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
chief_only_hooks=[fake_hook],
save_checkpoint_secs=0) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# Check whether custom hook called or not
self.assertEqual(1, fake_hook.call_counter['begin'])
# A restart will not find the checkpoint, since we didn't save.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(test.TestCase):
"""_WrappedSession tests."""
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEquals(sess.graph, wrapped_sess.graph)
self.assertEquals(sess.sess_str, wrapped_sess.sess_str)
def test_should_stop_on_close(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_uses_check_stop(self):
with self.test_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_delegates_to_wrapped_session(self):
with self.test_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
def test_close_twice(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(test.TestCase):
"""_CoordinatedSession tests."""
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEquals(sess.graph, coord_sess.graph)
self.assertEquals(sess.sess_str, coord_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
def test_should_stop_on_close(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
def test_should_stop_on_coord_stop(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
def test_stop_threads_on_close_after_exception(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
class AbortAtNSession(object):
"""A mock sessionthat aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise errors_impl.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class RecoverableSessionTest(test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEquals(sess.graph, recoverable_sess.graph)
self.assertEquals(sess.sess_str, recoverable_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
def test_recovery(self):
with self.test_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = constant_op.constant(0)
v = array_ops.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegexp(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class HookedSessionTest(test.TestCase):
"""Tests of _HookedSession."""
def testRunPassesAllArguments(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values,
session_run_hook.SessionRunValues(
results=None,
options=config_pb2.RunOptions(),
run_metadata=config_pb2.RunMetadata()))
self.assertEqual(hook.last_run_context.original_args,
session_run_hook.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['begin'], 0)
self.assertEqual(hook.call_counter['after_create_session'], 0)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
another_tensor = constant_op.constant([5], name='another_tensor')
third_tensor = constant_op.constant([10], name='third_tensor')
mock_hook.request = session_run_hook.SessionRunArgs([another_tensor])
mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor])
sess.run(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
c_tensor = constant_op.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [10]})
sess.run(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(session_run_hook.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class RunOptionsMetadataHook(session_run_hook.SessionRunHook):
"""A hook that observes & optionally modifies RunOptions and RunMetadata."""
def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,
debug_tensor_watch):
self._trace_level = trace_level
self._timeout_in_ms = timeout_in_ms
self._output_partition_graphs = output_partition_graphs
self._debug_tensor_watch = debug_tensor_watch
self.run_options_list = []
self.run_metadata_list = []
def before_run(self, run_context):
options = config_pb2.RunOptions(
trace_level=self._trace_level,
timeout_in_ms=self._timeout_in_ms,
output_partition_graphs=self._output_partition_graphs)
options.debug_options.debug_tensor_watch_opts.extend(
[self._debug_tensor_watch])
return session_run_hook.SessionRunArgs(None, None, options=options)
def after_run(self, run_context, run_values):
self.run_options_list.append(run_values.options)
self.run_metadata_list.append(run_values.run_metadata)
class MonitoredSessionTest(test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with ops.Graph().as_default():
a_var = variables.Variable(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)]
scaffold = monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)]
scaffold = monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold,
checkpoint_filename_with_path=saver_lib.latest_checkpoint(
logdir))) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_initialization_on_aborted_error(self):
# Tests that we silently retry on abort during initialization.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
self.init_raised_aborted_error = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
if not self.init_raised_aborted_error:
self.init_raised_aborted_error = True
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(
init_fn=_init_fn))) as session:
self.assertFalse(session.should_stop())
self.assertEqual(0, session.run(gstep))
self.assertTrue(self.init_raised_aborted_error)
def test_retry_on_aborted_error(self):
# Tests that we silently retry on abort. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, errors_impl.AbortedError(None, None, 'Abort'))
with monitored_session.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, errors_impl.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = monitored_session.MonitoredSession(hooks=[hook])
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.MonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_graph_finalized_during_run_unfinalized_after_exit(self):
with ops.Graph().as_default() as g:
a_var = variables.Variable(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertFalse(g.finalized)
def test_keep_finalized_graph_as_finalized(self):
with ops.Graph().as_default() as g:
a_var = variables.Variable(0)
monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertTrue(g.finalized)
def test_merge_run_options_from_hooks(self):
"""Test for rewriting RunOptions and observing RunMetadata with hooks."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
watch_a = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a)
watch_b = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b)
with monitored_session.MonitoredSession(
hooks=[hook_a, hook_b]) as session:
self.assertEqual(42, session.run(my_const))
# trace_level=3 should have overridden trace_level=2;
# timeout_in_ms=60000 should have overridden 30000;
# output_partition_graphs=True should have overridden False.
# The two debug tensor watches should have been merged.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[watch_a, watch_b]))
],
hook_b.run_options_list)
self.assertEqual(1, len(hook_b.run_metadata_list))
self.assertTrue(
isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)
def test_merge_caller_and_hook_run_options(self):
"""Test that RunOptions from caller and hooks can be merged properly."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
hook_watch = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook = RunOptionsMetadataHook(2, 60000, False, hook_watch)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
caller_watch = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
caller_options = config_pb2.RunOptions(
trace_level=3, timeout_in_ms=30000, output_partition_graphs=True)
caller_options.debug_options.debug_tensor_watch_opts.extend(
[caller_watch])
self.assertEqual(42, session.run(my_const, options=caller_options))
# trace_level=3 from the caller should override 2 from the hook.
# timeout_in_ms=60000 from the hook should override from the caller.
# output_partition_graph=True from the caller should override False
# from the hook.
# The two debug watches from the caller and the hook should be merged,
# in that order.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[caller_watch, hook_watch]))
],
hook.run_options_list)
self.assertEqual(1, len(hook.run_metadata_list))
self.assertTrue(
isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)
class SingularMonitoredSessionTest(test.TestCase):
"""Tests SingularMonitoredSession."""
def test_handles_initialization(self):
with ops.Graph().as_default():
a_var = variables.Variable(0)
with monitored_session.SingularMonitoredSession() as session:
# If it's not initialized, following statement raises an error.
self.assertEqual(0, session.run(a_var))
def test_do_not_handle_aborted_error(self):
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
class _RaiseAbortedHook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
with self.assertRaises(errors_impl.AbortedError):
self.assertEqual(0, session.run(gstep))
with self.assertRaises(errors_impl.AbortedError):
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
self.assertEqual(0, session.run(gstep))
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.SingularMonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.SingularMonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.SingularMonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertEqual(None, session.raw_session())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.SingularMonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_raw_session(self):
with ops.Graph().as_default():
with monitored_session.SingularMonitoredSession() as session:
self.assertTrue(isinstance(session.raw_session(), session_lib.Session))
if __name__ == '__main__':
test.main()
|
server.py
|
import subprocess
import tempfile
import sys
from beat import playHeartBeat
from arduino import myHeartBeat
from threading import Thread
import datetime
import socket
import threading
#otherBeat = 68
otherDevice = "stormy-fortress-18687"
def playBeat():
while True:
playHeartBeat(68)
HOST = '138.16.161.123'
PORT = 65432
def connect():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
th=[]
# conn, addr = s.accept()
# with conn:
# print('Connected by', addr)
while True:
conn, addr = s.accept()
# send over heartrate here
th.append(Thread(target=listener, args = (conn,addr)).start())
data = conn.recv(1024)
if not data:
continue
#conn.sendall(b'server says hi')
otherBeat = data.decode('utf-8')
otherBeat.split(" ")
# print('other beat: ' + otherBeat)
if otherBeat != '':
otherBeat = int(otherBeat)
if otherBeat > 0:
playHeartBeat(otherBeat)
s.close()
# if not data:
# break
def listener(client, address):
while True:
# data = client.recv(1024)
# if not data:
# break
beat = str(myHeartBeat())
# print('my beat: ' + beat)
client.send(beat.encode())
connect()
#playBeat()
# x= threading.Thread(target=playBeat)
# x.start()
# y = threading.Thread(target=connect)
# y.start()
|
gui - Copy.py
|
# -*- coding: utf-8 -*-
import sys
from threading import Thread
from pathlib import Path
import time
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from scipy.stats import lognorm, norm
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5 import Qt
# from .visualization import VtkWindow
# from .inout import *
# from .measure import *
# from .qslider import QSliceRange
from visualization import VtkWindow
from inout import *
from measure import *
from qslider import QSliceRange
class MainWindow(Qt.QMainWindow):
# Threading signals
calc_skel = pyqtSignal(np.ndarray)
calc_dist = pyqtSignal(np.ndarray)
def __init__(self, parent = None):
Qt.QMainWindow.__init__(self)
self.initUI()
# Steps
self.im = None
self.skel = None
self.dist = None
self.diam = None
# Connect threaded outputs to finishing function
self.calc_skel.connect(self.finish_skeletonize)
self.calc_dist.connect(self.finish_dist)
def initUI(self):
self.setGeometry(200, 200, 600, 600)
self.setWindowTitle("AQUAMI 3D")
self.statusBar().setStyleSheet("background-color:white")
# Menu frame
self.menuFrame = QFrame
self.menuGrid = QGridLayout()
self.menuGrid.setSpacing(10)
self.menuGrid.setRowStretch(10,1)
load_button = QPushButton("Load", self)
load_button.setToolTip('Load a 3D dataset')
load_button.clicked.connect(self.load_click)
self.menuGrid.addWidget(load_button, 0,0)
skel_button = QPushButton("Skeletonize", self)
skel_button.setToolTip('Skeletonize image')
skel_button.clicked.connect(self.skel_click)
self.menuGrid.addWidget(skel_button, 1, 0)
dist_button = QPushButton("Dist Transform", self)
dist_button.setToolTip('Find the distance transform of the image')
dist_button.clicked.connect(self.dist_click)
self.menuGrid.addWidget(dist_button, 2, 0)
diam_button = QPushButton("Diameter", self)
diam_button.setToolTip('Calculate the diameter distribution')
diam_button.clicked.connect(self.diam_click)
self.menuGrid.addWidget(diam_button, 3, 0)
self.sliceRange = QSliceRange()
self.vtk = VtkWindow()
grid = QGridLayout()
grid.setSpacing(10)
grid.setRowStretch(0, 10)
grid.setRowMinimumHeight(0,500)
grid.setColumnStretch(1, 5)
grid.addLayout(self.menuGrid, 0, 0) # widget,row,column
grid.addWidget(self.vtk, 0,1)
grid.addWidget(self.sliceRange, 1,1)
centralWidget = QWidget()
centralWidget.setLayout(grid)
self.setCentralWidget(centralWidget)
self.show()
self.threadpool = QThreadPool()
@pyqtSlot()
def load_click(self):
path, _ = QFileDialog.getOpenFileName(self,"Select 3D image")
self.im = read_tiff_stack(path)
self.vtk.update(self.im, (0.05, 0.5))
self.sliceRange.set_range_maximums(self.im.shape)
try:
self.sliceRange.valueChanged.disconnect()
except:
pass
self.sliceRange.valueChanged.connect(self.load_update)
@pyqtSlot()
def skel_click(self):
if self.im is None:
QMessageBox.warning(self, "Warning", "Please load an image first",
QMessageBox.Ok)
elif self.skel is None:
self.statusBar().showMessage("Skeletonizing...")
Thread(target=self.start_skeletonize).start()
else:
self.vtk.update(self.skel, (0, 1))
def start_skeletonize(self):
self.calc_skel.emit(skeletonize(self.im))
def finish_skeletonize(self, skel):
self.skel = skel
self.statusBar().showMessage("Finding nodes...")
self.nodes = find_nodes(self.skel)
self.vtk.display(self.im, self.skel, self.nodes)
try:
self.sliceRange.valueChanged.disconnect()
except:
pass
self.sliceRange.valueChanged.connect(self.skel_update)
self.statusBar().showMessage("")
@pyqtSlot()
def dist_click(self):
if self.im is None:
QMessageBox.warning(self, "Warning", "Please load an image first",
QMessageBox.Ok)
elif self.dist is None:
self.statusBar().showMessage("Calculating distance tranform...")
Thread(target=self.start_dist).start()
else:
self.vtk.update(self.dist, (0, 1))
def start_dist(self):
self.calc_dist.emit(distance_transform(self.im))
def finish_dist(self, dist):
self.dist = dist
self.vtk.update(self.dist, (0, 0.5))
self.statusBar().showMessage("")
@pyqtSlot()
def diam_click(self):
if self.im is None:
QMessageBox.warning(self, "Warning", "Please load an image first",
QMessageBox.Ok)
else:
data = VolumeData(self.im)
# diams = calculate_diameter(self.skel, self.dist)
# n, bins, patches = plt.hist(diams, 20, edgecolor='black', normed=1)
# gfit = norm.fit(diams.flatten())
# gauss_plot = norm.pdf(bins, gfit[0], gfit[1])
# plt.plot(bins, gauss_plot, 'r--', linewidth=2)
# plt.show()
def load_update(self, i):
self.vtk.update(self.im[i[0]:i[1], i[2]:i[3], i[4]:i[5]], (0.05, 0.5))
def skel_update(self, i):
self.vtk.display(self.im[i[0]:i[1], i[2]:i[3], i[4]:i[5]],
self.skel[i[0]:i[1], i[2]:i[3], i[4]:i[5]],
self.nodes[i[0]:i[1], i[2]:i[3], i[4]:i[5]])
if __name__ == "__main__":
app = Qt.QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
|
test_dispatcher.py
|
from __future__ import print_function, division, absolute_import
import errno
import multiprocessing
import os
import platform
import shutil
import subprocess
import sys
import threading
import warnings
import inspect
import pickle
import weakref
from itertools import chain
try:
import jinja2
except ImportError:
jinja2 = None
try:
import pygments
except ImportError:
pygments = None
import numpy as np
from numba import unittest_support as unittest
from numba import utils, jit, generated_jit, types, typeof, errors
from numba import _dispatcher
from numba.compiler import compile_isolated
from numba.errors import NumbaWarning
from .support import (TestCase, tag, temp_directory, import_dynamic,
override_env_config, capture_cache_log, captured_stdout)
from numba.numpy_support import as_dtype
from numba.targets import codegen
from numba.caching import _UserWideCacheLocator
from numba.dispatcher import Dispatcher
from numba import parfor
from .test_linalg import needs_lapack
from .support import skip_parfors_unsupported
import llvmlite.binding as ll
_is_armv7l = platform.machine() == 'armv7l'
def dummy(x):
return x
def add(x, y):
return x + y
def addsub(x, y, z):
return x - y + z
def addsub_defaults(x, y=2, z=3):
return x - y + z
def star_defaults(x, y=2, *z):
return x, y, z
def generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x, y):
return x + y
else:
def impl(x, y):
return x - y
return impl
def bad_generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x):
return x
else:
def impl(x, y=6):
return x - y
return impl
def dtype_generated_usecase(a, b, dtype=None):
if isinstance(dtype, (types.misc.NoneType, types.misc.Omitted)):
out_dtype = np.result_type(*(np.dtype(ary.dtype.name)
for ary in (a, b)))
elif isinstance(dtype, (types.DType, types.NumberClass)):
out_dtype = as_dtype(dtype)
else:
raise TypeError("Unhandled Type %s" % type(dtype))
def _fn(a, b, dtype=None):
return np.ones(a.shape, dtype=out_dtype)
return _fn
class BaseTest(TestCase):
jit_args = dict(nopython=True)
def compile_func(self, pyfunc):
def check(*args, **kwargs):
expected = pyfunc(*args, **kwargs)
result = f(*args, **kwargs)
self.assertPreciseEqual(result, expected)
f = jit(**self.jit_args)(pyfunc)
return f, check
def check_access_is_preventable():
# This exists to check whether it is possible to prevent access to
# a file/directory through the use of `chmod 500`. If a user has
# elevated rights (e.g. root) then writes are likely to be possible
# anyway. Tests that require functioning access prevention are
# therefore skipped based on the result of this check.
tempdir = temp_directory('test_cache')
test_dir = (os.path.join(tempdir, 'writable_test'))
os.mkdir(test_dir)
# assume access prevention is not possible
ret = False
# check a write is possible
with open(os.path.join(test_dir, 'write_ok'), 'wt') as f:
f.write('check1')
# now forbid access
os.chmod(test_dir, 0o500)
try:
with open(os.path.join(test_dir, 'write_forbidden'), 'wt') as f:
f.write('check2')
except (OSError, IOError) as e:
# Check that the cause of the exception is due to access/permission
# as per https://github.com/conda/conda/blob/4.5.0/conda/gateways/disk/permissions.py#L35-L37
eno = getattr(e, 'errno', None)
if eno in (errno.EACCES, errno.EPERM):
# errno reports access/perm fail so access prevention via
# `chmod 500` works for this user.
ret = True
finally:
os.chmod(test_dir, 0o775)
shutil.rmtree(test_dir)
return ret
_access_preventable = check_access_is_preventable()
_access_msg = "Cannot create a directory to which writes are preventable"
skip_bad_access = unittest.skipUnless(_access_preventable, _access_msg)
class TestDispatcher(BaseTest):
def test_dyn_pyfunc(self):
@jit
def foo(x):
return x
foo(1)
[cr] = foo.overloads.values()
# __module__ must be match that of foo
self.assertEqual(cr.entry_point.__module__, foo.py_func.__module__)
def test_no_argument(self):
@jit
def foo():
return 1
# Just make sure this doesn't crash
foo()
def test_coerce_input_types(self):
# Issue #486: do not allow unsafe conversions if we can still
# compile other specializations.
c_add = jit(nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
self.assertPreciseEqual(c_add(12.3, 45.6), add(12.3, 45.6))
self.assertPreciseEqual(c_add(12.3, 45.6j), add(12.3, 45.6j))
self.assertPreciseEqual(c_add(12300000000, 456), add(12300000000, 456))
# Now force compilation of only a single specialization
c_add = jit('(i4, i4)', nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
# Implicit (unsafe) conversion of float to int
self.assertPreciseEqual(c_add(12.3, 45.6), add(12, 45))
with self.assertRaises(TypeError):
# Implicit conversion of complex to int disallowed
c_add(12.3, 45.6j)
def test_ambiguous_new_version(self):
"""Test compiling new version in an ambiguous case
"""
@jit
def foo(a, b):
return a + b
INT = 1
FLT = 1.5
self.assertAlmostEqual(foo(INT, FLT), INT + FLT)
self.assertEqual(len(foo.overloads), 1)
self.assertAlmostEqual(foo(FLT, INT), FLT + INT)
self.assertEqual(len(foo.overloads), 2)
self.assertAlmostEqual(foo(FLT, FLT), FLT + FLT)
self.assertEqual(len(foo.overloads), 3)
# The following call is ambiguous because (int, int) can resolve
# to (float, int) or (int, float) with equal weight.
self.assertAlmostEqual(foo(1, 1), INT + INT)
self.assertEqual(len(foo.overloads), 4, "didn't compile a new "
"version")
def test_lock(self):
"""
Test that (lazy) compiling from several threads at once doesn't
produce errors (see issue #908).
"""
errors = []
@jit
def foo(x):
return x + 1
def wrapper():
try:
self.assertEqual(foo(1), 2)
except Exception as e:
errors.append(e)
threads = [threading.Thread(target=wrapper) for i in range(16)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(errors)
def test_explicit_signatures(self):
f = jit("(int64,int64)")(add)
# Approximate match (unsafe conversion)
self.assertPreciseEqual(f(1.5, 2.5), 3)
self.assertEqual(len(f.overloads), 1, f.overloads)
f = jit(["(int64,int64)", "(float64,float64)"])(add)
# Exact signature matches
self.assertPreciseEqual(f(1, 2), 3)
self.assertPreciseEqual(f(1.5, 2.5), 4.0)
# Approximate match (int32 -> float64 is a safe conversion)
self.assertPreciseEqual(f(np.int32(1), 2.5), 3.5)
# No conversion
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertIn("No matching definition", str(cm.exception))
self.assertEqual(len(f.overloads), 2, f.overloads)
# A more interesting one...
f = jit(["(float32,float32)", "(float64,float64)"])(add)
self.assertPreciseEqual(f(np.float32(1), np.float32(2**-25)), 1.0)
self.assertPreciseEqual(f(1, 2**-25), 1.0000000298023224)
# Fail to resolve ambiguity between the two best overloads
f = jit(["(float32,float64)",
"(float64,float32)",
"(int64,int64)"])(add)
with self.assertRaises(TypeError) as cm:
f(1.0, 2.0)
# The two best matches are output in the error message, as well
# as the actual argument types.
self.assertRegexpMatches(
str(cm.exception),
r"Ambiguous overloading for <function add [^>]*> \(float64, float64\):\n"
r"\(float32, float64\) -> float64\n"
r"\(float64, float32\) -> float64"
)
# The integer signature is not part of the best matches
self.assertNotIn("int64", str(cm.exception))
def test_signature_mismatch(self):
tmpl = "Signature mismatch: %d argument types given, but function takes 2 arguments"
with self.assertRaises(TypeError) as cm:
jit("()")(add)
self.assertIn(tmpl % 0, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,)")(add)
self.assertIn(tmpl % 1, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,intc,intc)")(add)
self.assertIn(tmpl % 3, str(cm.exception))
# With forceobj=True, an empty tuple is accepted
jit("()", forceobj=True)(add)
with self.assertRaises(TypeError) as cm:
jit("(intc,)", forceobj=True)(add)
self.assertIn(tmpl % 1, str(cm.exception))
def test_matching_error_message(self):
f = jit("(intc,intc)")(add)
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertEqual(str(cm.exception),
"No matching definition for argument type(s) "
"complex128, complex128")
def test_disabled_compilation(self):
@jit
def foo(a):
return a
foo.compile("(float32,)")
foo.disable_compile()
with self.assertRaises(RuntimeError) as raises:
foo.compile("(int32,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 1)
def test_disabled_compilation_through_list(self):
@jit(["(float32,)", "(int32,)"])
def foo(a):
return a
with self.assertRaises(RuntimeError) as raises:
foo.compile("(complex64,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 2)
def test_disabled_compilation_nested_call(self):
@jit(["(intp,)"])
def foo(a):
return a
@jit
def bar():
foo(1)
foo(np.ones(1)) # no matching definition
with self.assertRaises(TypeError) as raises:
bar()
m = "No matching definition for argument type(s) array(float64, 1d, C)"
self.assertEqual(str(raises.exception), m)
def test_fingerprint_failure(self):
"""
Failure in computing the fingerprint cannot affect a nopython=False
function. On the other hand, with nopython=True, a ValueError should
be raised to report the failure with fingerprint.
"""
@jit
def foo(x):
return x
# Empty list will trigger failure in compile_fingerprint
errmsg = 'cannot compute fingerprint of empty list'
with self.assertRaises(ValueError) as raises:
_dispatcher.compute_fingerprint([])
self.assertIn(errmsg, str(raises.exception))
# It should work in fallback
self.assertEqual(foo([]), [])
# But, not in nopython=True
strict_foo = jit(nopython=True)(foo.py_func)
with self.assertRaises(ValueError) as raises:
strict_foo([])
self.assertIn(errmsg, str(raises.exception))
# Test in loop lifting context
@jit
def bar():
object() # force looplifting
x = []
for i in range(10):
x = foo(x)
return x
self.assertEqual(bar(), [])
# Make sure it was looplifted
[cr] = bar.overloads.values()
self.assertEqual(len(cr.lifted), 1)
def test_serialization(self):
"""
Test serialization of Dispatcher objects
"""
@jit(nopython=True)
def foo(x):
return x + 1
self.assertEqual(foo(1), 2)
# get serialization memo
memo = Dispatcher._memo
Dispatcher._recent.clear()
memo_size = len(memo)
# pickle foo and check memo size
serialized_foo = pickle.dumps(foo)
# increases the memo size
self.assertEqual(memo_size + 1, len(memo))
# unpickle
foo_rebuilt = pickle.loads(serialized_foo)
self.assertEqual(memo_size + 1, len(memo))
self.assertIs(foo, foo_rebuilt)
# do we get the same object even if we delete all the explict references?
id_orig = id(foo_rebuilt)
del foo
del foo_rebuilt
self.assertEqual(memo_size + 1, len(memo))
new_foo = pickle.loads(serialized_foo)
self.assertEqual(id_orig, id(new_foo))
# now clear the recent cache
ref = weakref.ref(new_foo)
del new_foo
Dispatcher._recent.clear()
self.assertEqual(memo_size, len(memo))
# show that deserializing creates a new object
pickle.loads(serialized_foo)
self.assertIs(ref(), None)
@needs_lapack
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_misaligned_array_dispatch(self):
# for context see issue #2937
def foo(a):
return np.linalg.matrix_power(a, 1)
jitfoo = jit(nopython=True)(foo)
n = 64
r = int(np.sqrt(n))
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r)
C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r)
F_contig_aligned = C_contig_aligned.T
F_contig_misaligned = C_contig_misaligned.T
# checking routine
def check(name, a):
a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r)
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# The checks must be run in this order to create the dispatch key
# sequence that causes invalid dispatch noted in #2937.
# The first two should hit the cache as they are aligned, supported
# order and under 5 dimensions. The second two should end up in the
# fallback path as they are misaligned.
check("C_contig_aligned", C_contig_aligned)
check("F_contig_aligned", F_contig_aligned)
check("C_contig_misaligned", C_contig_misaligned)
check("F_contig_misaligned", F_contig_misaligned)
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_immutability_in_array_dispatch(self):
# RO operation in function
def foo(a):
return np.sum(a)
jitfoo = jit(nopython=True)(foo)
n = 64
r = int(np.sqrt(n))
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r)
C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r)
F_contig_aligned = C_contig_aligned.T
F_contig_misaligned = C_contig_misaligned.T
# checking routine
def check(name, a, disable_write_bit=False):
a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r)
if disable_write_bit:
a.flags.writeable = False
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# all of these should end up in the fallback path as they have no write
# bit set
check("C_contig_aligned", C_contig_aligned, disable_write_bit=True)
check("F_contig_aligned", F_contig_aligned, disable_write_bit=True)
check("C_contig_misaligned", C_contig_misaligned,
disable_write_bit=True)
check("F_contig_misaligned", F_contig_misaligned,
disable_write_bit=True)
@needs_lapack
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_misaligned_high_dimension_array_dispatch(self):
def foo(a):
return np.linalg.matrix_power(a[0, 0, 0, 0, :, :], 1)
jitfoo = jit(nopython=True)(foo)
def check_properties(arr, layout, aligned):
self.assertEqual(arr.flags.aligned, aligned)
if layout == "C":
self.assertEqual(arr.flags.c_contiguous, True)
if layout == "F":
self.assertEqual(arr.flags.f_contiguous, True)
n = 729
r = 3
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).\
reshape(r, r, r, r, r, r)
check_properties(C_contig_aligned, 'C', True)
C_contig_misaligned = tmp[1:].view(np.complex128).\
reshape(r, r, r, r, r, r)
check_properties(C_contig_misaligned, 'C', False)
F_contig_aligned = C_contig_aligned.T
check_properties(F_contig_aligned, 'F', True)
F_contig_misaligned = C_contig_misaligned.T
check_properties(F_contig_misaligned, 'F', False)
# checking routine
def check(name, a):
a[:, :] = np.arange(n, dtype=np.complex128).\
reshape(r, r, r, r, r, r)
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# these should all hit the fallback path as the cache is only for up to
# 5 dimensions
check("F_contig_misaligned", F_contig_misaligned)
check("C_contig_aligned", C_contig_aligned)
check("F_contig_aligned", F_contig_aligned)
check("C_contig_misaligned", C_contig_misaligned)
def test_dispatch_recompiles_for_scalars(self):
# for context #3612, essentially, compiling a lambda x:x for a
# numerically wide type (everything can be converted to a complex128)
# and then calling again with e.g. an int32 would lead to the int32
# being converted to a complex128 whereas it ought to compile an int32
# specialization.
def foo(x):
return x
# jit and compile on dispatch for 3 scalar types, expect 3 signatures
jitfoo = jit(nopython=True)(foo)
jitfoo(np.complex128(1 + 2j))
jitfoo(np.int32(10))
jitfoo(np.bool_(False))
self.assertEqual(len(jitfoo.signatures), 3)
expected_sigs = [(types.complex128,), (types.int32,), (types.bool_,)]
self.assertEqual(jitfoo.signatures, expected_sigs)
# now jit with signatures so recompilation is forbidden
# expect 1 signature and type conversion
jitfoo = jit([(types.complex128,)], nopython=True)(foo)
jitfoo(np.complex128(1 + 2j))
jitfoo(np.int32(10))
jitfoo(np.bool_(False))
self.assertEqual(len(jitfoo.signatures), 1)
expected_sigs = [(types.complex128,)]
self.assertEqual(jitfoo.signatures, expected_sigs)
class TestSignatureHandling(BaseTest):
"""
Test support for various parameter passing styles.
"""
@tag('important')
def test_named_args(self):
"""
Test passing named arguments to a dispatcher.
"""
f, check = self.compile_func(addsub)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# All calls above fall under the same specialization
self.assertEqual(len(f.overloads), 1)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected 3, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6)
self.assertIn("missing argument 'z'", str(cm.exception))
def test_default_args(self):
"""
Test omitting arguments with a default value.
"""
f, check = self.compile_func(addsub_defaults)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# Now omitting some values
check(3, z=10)
check(3, 4)
check(x=3, y=4)
check(3)
check(x=3)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected at least 1, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(y=6, z=7)
self.assertIn("missing argument 'x'", str(cm.exception))
def test_star_args(self):
"""
Test a compiled function with starargs in the signature.
"""
f, check = self.compile_func(star_defaults)
check(4)
check(4, 5)
check(4, 5, 6)
check(4, 5, 6, 7)
check(4, 5, 6, 7, 8)
check(x=4)
check(x=4, y=5)
check(4, y=5)
with self.assertRaises(TypeError) as cm:
f(4, 5, y=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, 5, z=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, x=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
class TestSignatureHandlingObjectMode(TestSignatureHandling):
"""
Sams as TestSignatureHandling, but in object mode.
"""
jit_args = dict(forceobj=True)
class TestGeneratedDispatcher(TestCase):
"""
Tests for @generated_jit.
"""
@tag('important')
def test_generated(self):
f = generated_jit(nopython=True)(generated_usecase)
self.assertEqual(f(8), 8 - 5)
self.assertEqual(f(x=8), 8 - 5)
self.assertEqual(f(x=8, y=4), 8 - 4)
self.assertEqual(f(1j), 5 + 1j)
self.assertEqual(f(1j, 42), 42 + 1j)
self.assertEqual(f(x=1j, y=7), 7 + 1j)
@tag('important')
def test_generated_dtype(self):
f = generated_jit(nopython=True)(dtype_generated_usecase)
a = np.ones((10,), dtype=np.float32)
b = np.ones((10,), dtype=np.float64)
self.assertEqual(f(a, b).dtype, np.float64)
self.assertEqual(f(a, b, dtype=np.dtype('int32')).dtype, np.int32)
self.assertEqual(f(a, b, dtype=np.int32).dtype, np.int32)
def test_signature_errors(self):
"""
Check error reporting when implementation signature doesn't match
generating function signature.
"""
f = generated_jit(nopython=True)(bad_generated_usecase)
# Mismatching # of arguments
with self.assertRaises(TypeError) as raises:
f(1j)
self.assertIn("should be compatible with signature '(x, y=5)', but has signature '(x)'",
str(raises.exception))
# Mismatching defaults
with self.assertRaises(TypeError) as raises:
f(1)
self.assertIn("should be compatible with signature '(x, y=5)', but has signature '(x, y=6)'",
str(raises.exception))
class TestDispatcherMethods(TestCase):
def test_recompile(self):
closure = 1
@jit
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2.5)
self.assertEqual(len(foo.signatures), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
# Everything was recompiled
self.assertEqual(len(foo.signatures), 2)
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3.5)
def test_recompile_signatures(self):
# Same as above, but with an explicit signature on @jit.
closure = 1
@jit("int32(int32)")
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3)
@tag('important')
def test_inspect_llvm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
llvms = foo.inspect_llvm()
self.assertEqual(len(llvms), 3)
# make sure the function name shows up in the llvm
for llvm_bc in llvms.values():
# Look for the function name
self.assertIn("foo", llvm_bc)
# Look for the argument names
self.assertIn("explicit_arg1", llvm_bc)
self.assertIn("explicit_arg2", llvm_bc)
def test_inspect_asm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
asms = foo.inspect_asm()
self.assertEqual(len(asms), 3)
# make sure the function name shows up in the llvm
for asm in asms.values():
# Look for the function name
self.assertTrue("foo" in asm)
def _check_cfg_display(self, cfg, wrapper=''):
# simple stringify test
if wrapper:
wrapper = "{}{}".format(len(wrapper), wrapper)
module_name = __name__.split('.', 1)[0]
module_len = len(module_name)
prefix = r'^digraph "CFG for \'_ZN{}{}{}'.format(wrapper, module_len, module_name)
self.assertRegexpMatches(str(cfg), prefix)
# .display() requires an optional dependency on `graphviz`.
# just test for the attribute without running it.
self.assertTrue(callable(cfg.display))
def test_inspect_cfg(self):
# Exercise the .inspect_cfg(). These are minimal tests and do not fully
# check the correctness of the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg() without arguments
cfgs = foo.inspect_cfg()
# Correct count of overloads
self.assertEqual(len(cfgs), 3)
# Makes sure all the signatures are correct
[s1, s2, s3] = cfgs.keys()
self.assertEqual(set([s1, s2, s3]),
set(map(lambda x: (typeof(x),), [a1, a2, a3])))
for cfg in cfgs.values():
self._check_cfg_display(cfg)
self.assertEqual(len(list(cfgs.values())), 3)
# Call inspect_cfg(signature)
cfg = foo.inspect_cfg(signature=foo.signatures[0])
self._check_cfg_display(cfg)
def test_inspect_cfg_with_python_wrapper(self):
# Exercise the .inspect_cfg() including the python wrapper.
# These are minimal tests and do not fully check the correctness of
# the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg(signature, show_wrapper="python")
cfg = foo.inspect_cfg(signature=foo.signatures[0],
show_wrapper="python")
self._check_cfg_display(cfg, wrapper='cpython')
def test_inspect_types(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method
foo.inspect_types(utils.StringIO())
# Test output
expected = str(foo.overloads[foo.signatures[0]].type_annotation)
with captured_stdout() as out:
foo.inspect_types()
assert expected in out.getvalue()
def test_inspect_types_with_signature(self):
@jit
def foo(a):
return a + 1
foo(1)
foo(1.0)
# Inspect all signatures
with captured_stdout() as total:
foo.inspect_types()
# Inspect first signature
with captured_stdout() as first:
foo.inspect_types(signature=foo.signatures[0])
# Inspect second signature
with captured_stdout() as second:
foo.inspect_types(signature=foo.signatures[1])
self.assertEqual(total.getvalue(), first.getvalue() + second.getvalue())
@unittest.skipIf(jinja2 is None, "please install the 'jinja2' package")
@unittest.skipIf(pygments is None, "please install the 'pygments' package")
def test_inspect_types_pretty(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method, dump the output
with captured_stdout():
ann = foo.inspect_types(pretty=True)
# ensure HTML <span> is found in the annotation output
for k, v in ann.ann.items():
span_found = False
for line in v['pygments_lines']:
if 'span' in line[2]:
span_found = True
self.assertTrue(span_found)
# check that file+pretty kwarg combo raises
with self.assertRaises(ValueError) as raises:
foo.inspect_types(file=utils.StringIO(), pretty=True)
self.assertIn("`file` must be None if `pretty=True`",
str(raises.exception))
def test_get_annotation_info(self):
@jit
def foo(a):
return a + 1
foo(1)
foo(1.3)
expected = dict(chain.from_iterable(foo.get_annotation_info(i).items()
for i in foo.signatures))
result = foo.get_annotation_info()
self.assertEqual(expected, result)
def test_issue_with_array_layout_conflict(self):
"""
This test an issue with the dispatcher when an array that is both
C and F contiguous is supplied as the first signature.
The dispatcher checks for F contiguous first but the compiler checks
for C contiguous first. This results in an C contiguous code inserted
as F contiguous function.
"""
def pyfunc(A, i, j):
return A[i, j]
cfunc = jit(pyfunc)
ary_c_and_f = np.array([[1.]])
ary_c = np.array([[0., 1.], [2., 3.]], order='C')
ary_f = np.array([[0., 1.], [2., 3.]], order='F')
exp_c = pyfunc(ary_c, 1, 0)
exp_f = pyfunc(ary_f, 1, 0)
self.assertEqual(1., cfunc(ary_c_and_f, 0, 0))
got_c = cfunc(ary_c, 1, 0)
got_f = cfunc(ary_f, 1, 0)
self.assertEqual(exp_c, got_c)
self.assertEqual(exp_f, got_f)
class BaseCacheTest(TestCase):
# This class is also used in test_cfunc.py.
# The source file that will be copied
usecases_file = None
# Make sure this doesn't conflict with another module
modname = None
def setUp(self):
self.tempdir = temp_directory('test_cache')
sys.path.insert(0, self.tempdir)
self.modfile = os.path.join(self.tempdir, self.modname + ".py")
self.cache_dir = os.path.join(self.tempdir, "__pycache__")
shutil.copy(self.usecases_file, self.modfile)
self.maxDiff = None
def tearDown(self):
sys.modules.pop(self.modname, None)
sys.path.remove(self.tempdir)
def import_module(self):
# Import a fresh version of the test module. All jitted functions
# in the test module will start anew and load overloads from
# the on-disk cache if possible.
old = sys.modules.pop(self.modname, None)
if old is not None:
# Make sure cached bytecode is removed
if sys.version_info >= (3,):
cached = [old.__cached__]
else:
if old.__file__.endswith(('.pyc', '.pyo')):
cached = [old.__file__]
else:
cached = [old.__file__ + 'c', old.__file__ + 'o']
for fn in cached:
try:
os.unlink(fn)
except OSError as e:
if e.errno != errno.ENOENT:
raise
mod = import_dynamic(self.modname)
self.assertEqual(mod.__file__.rstrip('co'), self.modfile)
return mod
def cache_contents(self):
try:
return [fn for fn in os.listdir(self.cache_dir)
if not fn.endswith(('.pyc', ".pyo"))]
except OSError as e:
if e.errno != errno.ENOENT:
raise
return []
def get_cache_mtimes(self):
return dict((fn, os.path.getmtime(os.path.join(self.cache_dir, fn)))
for fn in sorted(self.cache_contents()))
def check_pycache(self, n):
c = self.cache_contents()
self.assertEqual(len(c), n, c)
def dummy_test(self):
pass
class BaseCacheUsecasesTest(BaseCacheTest):
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def run_in_separate_process(self):
# Cached functions can be run from a distinct process.
# Also stresses issue #1603: uncached function calling cached function
# shouldn't fail compiling.
code = """if 1:
import sys
sys.path.insert(0, %(tempdir)r)
mod = __import__(%(modname)r)
mod.self_test()
""" % dict(tempdir=self.tempdir, modname=self.modname)
popen = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError("process failed with code %s: stderr follows\n%s\n"
% (popen.returncode, err.decode()))
def check_module(self, mod):
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
mod.self_test()
def check_hits(self, func, hits, misses=None):
st = func.stats
self.assertEqual(sum(st.cache_hits.values()), hits, st.cache_hits)
if misses is not None:
self.assertEqual(sum(st.cache_misses.values()), misses,
st.cache_misses)
class TestCache(BaseCacheUsecasesTest):
@tag('important')
def test_caching(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
self.check_hits(f, 0, 2)
f = mod.record_return
rec = f(mod.aligned_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
rec = f(mod.packed_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
self.check_pycache(9) # 3 index, 6 data
self.check_hits(f, 0, 2)
f = mod.generated_usecase
self.assertPreciseEqual(f(3, 2), 1)
self.assertPreciseEqual(f(3j, 2), 2 + 3j)
# Check the code runs ok from another process
self.run_in_separate_process()
@tag('important')
def test_caching_nrt_pruned(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
# NRT pruning may affect cache
self.assertPreciseEqual(f(2, np.arange(3)), 2 + np.arange(3) + 1)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
def test_inner_then_outer(self):
# Caching inner then outer function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.inner(3, 2), 6)
self.check_pycache(2) # 1 index, 1 data
# Uncached outer function shouldn't fail (issue #1603)
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
mod = self.import_module()
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
# Cached outer will create new cache entries
f = mod.outer
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(f(3.5, 2), 2.5)
self.check_pycache(6) # 2 index, 4 data
def test_outer_then_inner(self):
# Caching outer then inner function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.outer(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(mod.outer_uncached(3, 2), 2)
self.check_pycache(4) # same
mod = self.import_module()
f = mod.inner
self.assertPreciseEqual(f(3, 2), 6)
self.check_pycache(4) # same
self.assertPreciseEqual(f(3.5, 2), 6.5)
self.check_pycache(5) # 2 index, 3 data
def test_no_caching(self):
mod = self.import_module()
f = mod.add_nocache_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(0)
def test_looplifted(self):
# Loop-lifted functions can't be cached and raise a warning
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.looplifted
self.assertPreciseEqual(f(4), 6)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "looplifted" '
'as it uses lifted loops', str(w[0].message))
def test_big_array(self):
# Code references big array globals cannot be cached
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.use_big_array
np.testing.assert_equal(f(), mod.biggie)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "use_big_array" '
'as it uses dynamic globals', str(w[0].message))
def test_ctypes(self):
# Functions using a ctypes pointer can't be cached and raise
# a warning.
mod = self.import_module()
for f in [mod.use_c_sin, mod.use_c_sin_nest1, mod.use_c_sin_nest2]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
self.assertPreciseEqual(f(0.0), 0.0)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn(
'Cannot cache compiled function "{}"'.format(f.__name__),
str(w[0].message),
)
def test_closure(self):
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.closure1
self.assertPreciseEqual(f(3), 6)
f = mod.closure2
self.assertPreciseEqual(f(3), 8)
self.check_pycache(0)
self.assertEqual(len(w), 2)
for item in w:
self.assertIn('Cannot cache compiled function "closure"',
str(item.message))
def test_cache_reuse(self):
mod = self.import_module()
mod.add_usecase(2, 3)
mod.add_usecase(2.5, 3.5)
mod.add_objmode_usecase(2, 3)
mod.outer_uncached(2, 3)
mod.outer(2, 3)
mod.record_return(mod.packed_arr, 0)
mod.record_return(mod.aligned_arr, 1)
mod.generated_usecase(2, 3)
mtimes = self.get_cache_mtimes()
# Two signatures compiled
self.check_hits(mod.add_usecase, 0, 2)
mod2 = self.import_module()
self.assertIsNot(mod, mod2)
f = mod2.add_usecase
f(2, 3)
self.check_hits(f, 1, 0)
f(2.5, 3.5)
self.check_hits(f, 2, 0)
f = mod2.add_objmode_usecase
f(2, 3)
self.check_hits(f, 1, 0)
# The files haven't changed
self.assertEqual(self.get_cache_mtimes(), mtimes)
self.run_in_separate_process()
self.assertEqual(self.get_cache_mtimes(), mtimes)
def test_cache_invalidate(self):
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
# This should change the functions' results
with open(self.modfile, "a") as f:
f.write("\nZ = 10\n")
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_recompile(self):
# Explicit call to recompile() should overwrite the cache
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
mod = self.import_module()
f = mod.add_usecase
mod.Z = 10
self.assertPreciseEqual(f(2, 3), 6)
f.recompile()
self.assertPreciseEqual(f(2, 3), 15)
# Freshly recompiled version is re-used from other imports
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_same_names(self):
# Function with the same names should still disambiguate
mod = self.import_module()
f = mod.renamed_function1
self.assertPreciseEqual(f(2), 4)
f = mod.renamed_function2
self.assertPreciseEqual(f(2), 8)
def test_frozen(self):
from .dummy_module import function
old_code = function.__code__
code_obj = compile('pass', 'tests/dummy_module.py', 'exec')
try:
function.__code__ = code_obj
source = inspect.getfile(function)
# doesn't return anything, since it cannot find the module
# fails unless the executable is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsNone(locator)
sys.frozen = True
# returns a cache locator object, only works when executable is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsInstance(locator, _UserWideCacheLocator)
finally:
function.__code__ = old_code
del sys.frozen
def _test_pycache_fallback(self):
"""
With a disabled __pycache__, test there is a working fallback
(e.g. on the user-wide cache dir)
"""
mod = self.import_module()
f = mod.add_usecase
# Remove this function's cache files at the end, to avoid accumulation
# accross test calls.
self.addCleanup(shutil.rmtree, f.stats.cache_path, ignore_errors=True)
self.assertPreciseEqual(f(2, 3), 6)
# It's a cache miss since the file was copied to a new temp location
self.check_hits(f, 0, 1)
# Test re-use
mod2 = self.import_module()
f = mod2.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_hits(f, 1, 0)
# The __pycache__ is empty (otherwise the test's preconditions
# wouldn't be met)
self.check_pycache(0)
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_creatable_pycache(self):
# Make it impossible to create the __pycache__ directory
old_perms = os.stat(self.tempdir).st_mode
os.chmod(self.tempdir, 0o500)
self.addCleanup(os.chmod, self.tempdir, old_perms)
self._test_pycache_fallback()
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_writable_pycache(self):
# Make it impossible to write to the __pycache__ directory
pycache = os.path.join(self.tempdir, '__pycache__')
os.mkdir(pycache)
old_perms = os.stat(pycache).st_mode
os.chmod(pycache, 0o500)
self.addCleanup(os.chmod, pycache, old_perms)
self._test_pycache_fallback()
def test_ipython(self):
# Test caching in an IPython session
base_cmd = [sys.executable, '-m', 'IPython']
base_cmd += ['--quiet', '--quick', '--no-banner', '--colors=NoColor']
try:
ver = subprocess.check_output(base_cmd + ['--version'])
except subprocess.CalledProcessError as e:
self.skipTest("ipython not available: return code %d"
% e.returncode)
ver = ver.strip().decode()
print("ipython version:", ver)
# Create test input
inputfn = os.path.join(self.tempdir, "ipython_cache_usecase.txt")
with open(inputfn, "w") as f:
f.write(r"""
import os
import sys
from numba import jit
# IPython 5 does not support multiline input if stdin isn't
# a tty (https://github.com/ipython/ipython/issues/9752)
f = jit(cache=True)(lambda: 42)
res = f()
# IPython writes on stdout, so use stderr instead
sys.stderr.write(u"cache hits = %d\n" % f.stats.cache_hits[()])
# IPython hijacks sys.exit(), bypass it
sys.stdout.flush()
sys.stderr.flush()
os._exit(res)
""")
def execute_with_input():
# Feed the test input as stdin, to execute it in REPL context
with open(inputfn, "rb") as stdin:
p = subprocess.Popen(base_cmd, stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
if p.returncode != 42:
self.fail("unexpected return code %d\n"
"-- stdout:\n%s\n"
"-- stderr:\n%s\n"
% (p.returncode, out, err))
return err
execute_with_input()
# Run a second time and check caching
err = execute_with_input()
self.assertIn("cache hits = 1", err.strip())
@skip_parfors_unsupported
class TestSequentialParForsCache(BaseCacheUsecasesTest):
def setUp(self):
super(TestSequentialParForsCache, self).setUp()
# Turn on sequential parfor lowering
parfor.sequential_parfor_lowering = True
def tearDown(self):
super(TestSequentialParForsCache, self).tearDown()
# Turn off sequential parfor lowering
parfor.sequential_parfor_lowering = False
def test_caching(self):
mod = self.import_module()
self.check_pycache(0)
f = mod.parfor_usecase
ary = np.ones(10)
self.assertPreciseEqual(f(ary), ary * ary + ary)
dynamic_globals = [cres.library.has_dynamic_globals
for cres in f.overloads.values()]
self.assertEqual(dynamic_globals, [False])
self.check_pycache(2) # 1 index, 1 data
class TestCacheWithCpuSetting(BaseCacheUsecasesTest):
# Disable parallel testing due to envvars modification
_numba_parallel_test_ = False
def check_later_mtimes(self, mtimes_old):
match_count = 0
for k, v in self.get_cache_mtimes().items():
if k in mtimes_old:
self.assertGreaterEqual(v, mtimes_old[k])
match_count += 1
self.assertGreater(match_count, 0,
msg='nothing to compare')
def test_user_set_cpu_name(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU name to generic
with override_env_config('NUMBA_CPU_NAME', 'generic'):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][1] == ll.get_host_cpu_name():
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], codegen.get_host_cpu_features())
self.assertEqual(key_generic[1][1], 'generic')
self.assertEqual(key_generic[1][2], '')
def test_user_set_cpu_features(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU feature
my_cpu_features = '-sse;-avx'
system_features = codegen.get_host_cpu_features()
self.assertNotEqual(system_features, my_cpu_features)
with override_env_config('NUMBA_CPU_FEATURES', my_cpu_features):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][2] == system_features:
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], system_features)
self.assertEqual(key_generic[1][1], ll.get_host_cpu_name())
self.assertEqual(key_generic[1][2], my_cpu_features)
class TestMultiprocessCache(BaseCacheTest):
# Nested multiprocessing.Pool raises AssertionError:
# "daemonic processes are not allowed to have children"
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def test_multiprocessing(self):
# Check caching works from multiple processes at once (#2028)
mod = self.import_module()
# Calling a pure Python caller of the JIT-compiled function is
# necessary to reproduce the issue.
f = mod.simple_usecase_caller
n = 3
try:
ctx = multiprocessing.get_context('spawn')
except AttributeError:
ctx = multiprocessing
pool = ctx.Pool(n)
try:
res = sum(pool.imap(f, range(n)))
finally:
pool.close()
self.assertEqual(res, n * (n - 1) // 2)
class TestCacheFileCollision(unittest.TestCase):
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "caching_file_loc_fodder"
source_text_1 = """
from numba import njit
@njit(cache=True)
def bar():
return 123
"""
source_text_2 = """
from numba import njit
@njit(cache=True)
def bar():
return 321
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
sys.path.insert(0, self.tempdir)
self.modname = 'module_name_that_is_unlikely'
self.assertNotIn(self.modname, sys.modules)
self.modname_bar1 = self.modname
self.modname_bar2 = '.'.join([self.modname, 'foo'])
foomod = os.path.join(self.tempdir, self.modname)
os.mkdir(foomod)
with open(os.path.join(foomod, '__init__.py'), 'w') as fout:
print(self.source_text_1, file=fout)
with open(os.path.join(foomod, 'foo.py'), 'w') as fout:
print(self.source_text_2, file=fout)
def tearDown(self):
sys.modules.pop(self.modname_bar1, None)
sys.modules.pop(self.modname_bar2, None)
sys.path.remove(self.tempdir)
def import_bar1(self):
return import_dynamic(self.modname_bar1).bar
def import_bar2(self):
return import_dynamic(self.modname_bar2).bar
def test_file_location(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
# Check that the cache file is named correctly
idxname1 = bar1._cache._cache_file._index_name
idxname2 = bar2._cache._cache_file._index_name
self.assertNotEqual(idxname1, idxname2)
self.assertTrue(idxname1.startswith("__init__.bar-3.py"))
self.assertTrue(idxname2.startswith("foo.bar-3.py"))
@unittest.skipUnless(hasattr(multiprocessing, 'get_context'),
'Test requires multiprocessing.get_context')
def test_no_collision(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
with capture_cache_log() as buf:
res1 = bar1()
cachelog = buf.getvalue()
# bar1 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
with capture_cache_log() as buf:
res2 = bar2()
cachelog = buf.getvalue()
# bar2 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
self.assertNotEqual(res1, res2)
try:
# Make sure we can spawn new process without inheriting
# the parent context.
mp = multiprocessing.get_context('spawn')
except ValueError:
print("missing spawn context")
q = mp.Queue()
# Start new process that calls `cache_file_collision_tester`
proc = mp.Process(target=cache_file_collision_tester,
args=(q, self.tempdir,
self.modname_bar1,
self.modname_bar2))
proc.start()
# Get results from the process
log1 = q.get()
got1 = q.get()
log2 = q.get()
got2 = q.get()
proc.join()
# The remote execution result of bar1() and bar2() should match
# the one executed locally.
self.assertEqual(got1, res1)
self.assertEqual(got2, res2)
# The remote should have loaded bar1 from cache
self.assertEqual(log1.count('index saved'), 0)
self.assertEqual(log1.count('data saved'), 0)
self.assertEqual(log1.count('index loaded'), 1)
self.assertEqual(log1.count('data loaded'), 1)
# The remote should have loaded bar2 from cache
self.assertEqual(log2.count('index saved'), 0)
self.assertEqual(log2.count('data saved'), 0)
self.assertEqual(log2.count('index loaded'), 1)
self.assertEqual(log2.count('data loaded'), 1)
def cache_file_collision_tester(q, tempdir, modname_bar1, modname_bar2):
sys.path.insert(0, tempdir)
bar1 = import_dynamic(modname_bar1).bar
bar2 = import_dynamic(modname_bar2).bar
with capture_cache_log() as buf:
r1 = bar1()
q.put(buf.getvalue())
q.put(r1)
with capture_cache_log() as buf:
r2 = bar2()
q.put(buf.getvalue())
q.put(r2)
class TestCacheMultipleFilesWithSignature(unittest.TestCase):
# Regression test for https://github.com/numba/numba/issues/3658
_numba_parallel_test_ = False
source_text_file1 = """
from file2 import function2
"""
source_text_file2 = """
from numba import njit
@njit('float64(float64)', cache=True)
def function1(x):
return x
@njit('float64(float64)', cache=True)
def function2(x):
return x
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
self.file1 = os.path.join(self.tempdir, 'file1.py')
with open(self.file1, 'w') as fout:
print(self.source_text_file1, file=fout)
self.file2 = os.path.join(self.tempdir, 'file2.py')
with open(self.file2, 'w') as fout:
print(self.source_text_file2, file=fout)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_caching_mutliple_files_with_signature(self):
# Execute file1.py
popen = subprocess.Popen([sys.executable, self.file1],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
self.assertEqual(popen.returncode, 0)
# Execute file2.py
popen = subprocess.Popen([sys.executable, self.file2],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
self.assertEqual(popen.returncode, 0)
class TestDispatcherFunctionBoundaries(TestCase):
def test_pass_dispatcher_as_arg(self):
# Test that a Dispatcher object can be pass as argument
@jit(nopython=True)
def add1(x):
return x + 1
@jit(nopython=True)
def bar(fn, x):
return fn(x)
@jit(nopython=True)
def foo(x):
return bar(add1, x)
# Check dispatcher as argument inside NPM
inputs = [1, 11.1, np.arange(10)]
expected_results = [x + 1 for x in inputs]
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(foo(arg), expect)
# Check dispatcher as argument from python
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(bar(add1, arg), expect)
def test_dispatcher_as_arg_usecase(self):
@jit(nopython=True)
def maximum(seq, cmpfn):
tmp = seq[0]
for each in seq[1:]:
cmpval = cmpfn(tmp, each)
if cmpval < 0:
tmp = each
return tmp
got = maximum([1, 2, 3, 4], cmpfn=jit(lambda x, y: x - y))
self.assertEqual(got, 4)
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[0] - y[0]))
self.assertEqual(got, (4, 0))
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[1] - y[1]))
self.assertEqual(got, (0, 4))
def test_dispatcher_cannot_return_to_python(self):
@jit(nopython=True)
def foo(fn):
return fn
fn = jit(lambda x: x)
with self.assertRaises(TypeError) as raises:
foo(fn)
self.assertRegexpMatches(str(raises.exception),
"cannot convert native .* to Python object")
def test_dispatcher_in_sequence_arg(self):
@jit(nopython=True)
def one(x):
return x + 1
@jit(nopython=True)
def two(x):
return one(one(x))
@jit(nopython=True)
def three(x):
return one(one(one(x)))
@jit(nopython=True)
def choose(fns, x):
return fns[0](x), fns[1](x), fns[2](x)
# Tuple case
self.assertEqual(choose((one, two, three), 1), (2, 3, 4))
# List case
self.assertEqual(choose([one, one, one], 1), (2, 2, 2))
class TestBoxingDefaultError(unittest.TestCase):
# Testing default error at boxing/unboxing
def test_unbox_runtime_error(self):
# Dummy type has no unbox support
def foo(x):
pass
cres = compile_isolated(foo, (types.Dummy("dummy_type"),))
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point(None)
self.assertEqual(str(raises.exception), "can't unbox dummy_type type")
def test_box_runtime_error(self):
def foo():
return unittest # Module type has no boxing logic
cres = compile_isolated(foo, ())
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point()
pat = "cannot convert native Module.* to Python object"
self.assertRegexpMatches(str(raises.exception), pat)
class TestNoRetryFailedSignature(unittest.TestCase):
"""Test that failed-to-compile signatures are not recompiled.
"""
def run_test(self, func):
fcom = func._compiler
self.assertEqual(len(fcom._failed_cache), 0)
# expected failure because `int` has no `__getitem__`
with self.assertRaises(errors.TypingError):
func(1)
self.assertEqual(len(fcom._failed_cache), 1)
# retry
with self.assertRaises(errors.TypingError):
func(1)
self.assertEqual(len(fcom._failed_cache), 1)
# retry with double
with self.assertRaises(errors.TypingError):
func(1.0)
self.assertEqual(len(fcom._failed_cache), 2)
def test_direct_call(self):
@jit(nopython=True)
def foo(x):
return x[0]
self.run_test(foo)
def test_nested_call(self):
@jit(nopython=True)
def bar(x):
return x[0]
@jit(nopython=True)
def foobar(x):
bar(x)
@jit(nopython=True)
def foo(x):
return bar(x) + foobar(x)
self.run_test(foo)
def test_error_count(self):
def check(field, would_fail):
# Slightly modified from the reproducer in issue #4117.
# Before the patch, the compilation time of the failing case is
# much longer than of the successful case. This can be detected
# by the number of times `trigger()` is visited.
k = 10
counter = {'c': 0}
@generated_jit
def trigger(x):
# Keep track of every visit
counter['c'] += 1
if would_fail:
raise errors.TypingError("invoke_failed")
return lambda x: x
@jit(nopython=True)
def ident(out, x):
pass
def chain_assign(fs, inner=ident):
tab_head, tab_tail = fs[-1], fs[:-1]
@jit(nopython=True)
def assign(out, x):
inner(out, x)
out[0] += tab_head(x)
if tab_tail:
return chain_assign(tab_tail, assign)
else:
return assign
chain = chain_assign((trigger,) * k)
out = np.ones(2)
if would_fail:
with self.assertRaises(errors.TypingError) as raises:
chain(out, 1)
self.assertIn('invoke_failed', str(raises.exception))
else:
chain(out, 1)
# Returns the visit counts
return counter['c']
ct_ok = check('a', False)
ct_bad = check('c', True)
# `trigger()` is visited exactly once for both successful and failed
# compilation.
self.assertEqual(ct_ok, 1)
self.assertEqual(ct_bad, 1)
if __name__ == '__main__':
unittest.main()
|
weakaudio.py
|
#
# get at sound cards on both Mac and FreeBSD,
# using pyaudio / portaudio.
#
import sys
import numpy
import time
import threading
import multiprocessing
import os
import weakutil
import sdrip
import sdriq
import eb200
import sdrplay
import fmdemod
# desc is [ "6", "0" ] for a sound card -- sixth card, channel 0 (left).
# desc is [ "sdrip", "192.168.1.2" ] for RFSpace SDR-IP.
def new(desc, rate):
# sound card?
if desc[0].isdigit():
return Stream(int(desc[0]), int(desc[1]), rate)
if desc[0] == "sdrip":
return SDRIP(desc[1], rate)
if desc[0] == "sdriq":
return SDRIQ(desc[1], rate)
if desc[0] == "eb200":
return EB200(desc[1], rate)
if desc[0] == "sdrplay":
return SDRplay(desc[1], rate)
sys.stderr.write("weakaudio: cannot understand card %s\n" % (desc[0]))
usage()
sys.exit(1)
# need a single one of these even if multiple streams.
global_pya = None
def pya():
global global_pya
import pyaudio
if global_pya == None:
# suppress Jack and ALSA error messages on Linux.
#nullfd = os.open("/dev/null", 1)
#oerr = os.dup(2)
#os.dup2(nullfd, 2)
global_pya = pyaudio.PyAudio()
#os.dup2(oerr, 2)
#os.close(oerr)
#os.close(nullfd)
return global_pya
# find the lowest supported input rate >= rate.
# needed on Linux but not the Mac (which converts as needed).
def x_pya_input_rate(card, rate):
import pyaudio
rates = [ rate, 8000, 11025, 12000, 16000, 22050, 44100, 48000 ]
for r in rates:
if r >= rate:
ok = False
try:
ok = pya().is_format_supported(r,
input_device=card,
input_format=pyaudio.paInt16,
input_channels=1)
except:
pass
if ok:
return r
sys.stderr.write("weakaudio: no input rate >= %d\n" % (rate))
sys.exit(1)
# sub-process to avoid initializing pyaudio in main
# process, since that makes subsequent forks and
# multiprocessing not work.
def pya_input_rate(card, rate):
rpipe, wpipe = multiprocessing.Pipe(False)
pid = os.fork()
if pid == 0:
rpipe.close()
x = x_pya_input_rate(card, rate)
wpipe.send(x)
os._exit(0)
wpipe.close()
x = rpipe.recv()
os.waitpid(pid, 0)
rpipe.close()
return x
def x_pya_output_rate(card, rate):
import pyaudio
rates = [ rate, 8000, 11025, 12000, 16000, 22050, 44100, 48000 ]
for r in rates:
if r >= rate:
ok = False
try:
ok = pya().is_format_supported(r,
output_device=card,
output_format=pyaudio.paInt16,
output_channels=1)
except:
pass
if ok:
return r
sys.stderr.write("weakaudio: no output rate >= %d\n" % (rate))
sys.exit(1)
def pya_output_rate(card, rate):
rpipe, wpipe = multiprocessing.Pipe(False)
pid = os.fork()
if pid == 0:
rpipe.close()
x = x_pya_output_rate(card, rate)
wpipe.send(x)
os._exit(0)
wpipe.close()
x = rpipe.recv()
os.waitpid(pid, 0)
rpipe.close()
return x
class Stream:
def __init__(self, card, chan, rate):
self.use_oss = False
#self.use_oss = ("freebsd" in sys.platform)
self.card = card
self.chan = chan
# UNIX time of audio stream time zero.
self.t0 = None
if rate == None:
rate = pya_input_rate(card, 8000)
self.rate = rate # the sample rate the app wants.
self.cardrate = rate # the rate at which the card is running.
self.cardbufs = [ ]
self.cardlock = threading.Lock()
self.last_adc_end = None
self.last_end_time = None
if self.use_oss:
self.oss_open()
else:
self.pya_open()
self.resampler = weakutil.Resampler(self.cardrate, self.rate)
# rate at which len(self.raw_read()) increases.
self.rawrate = self.cardrate
# returns [ buf, tm ]
# where tm is UNIX seconds of the last sample.
# non-blocking.
# reads from a pipe from pya_dev2pipe in the pya sub-process.
# XXX won't work for oss.
def read(self):
[ buf1, tm ] = self.raw_read()
buf2 = self.postprocess(buf1)
return [ buf2, tm ]
def raw_read(self):
bufs = [ ]
end_time = self.last_end_time
while self.rpipe.poll():
e = self.rpipe.recv()
# e is [ pcm, unix_end_time ]
bufs.append(e[0])
end_time = e[1]
if len(bufs) > 0:
buf = numpy.concatenate(bufs)
else:
buf = numpy.array([])
self.last_end_time = end_time
return [ buf, end_time ]
def postprocess(self, buf):
if len(buf) > 0:
buf = self.resampler.resample(buf)
return buf
def junklog(self, msg):
msg1 = "[%d, %d] %s\n" % (self.card, self.chan, msg)
sys.stderr.write(msg1)
f = open("ft8-junk.txt", "a")
f.write(msg1)
f.close()
# PyAudio calls this in a separate thread.
def pya_callback(self, in_data, frame_count, time_info, status):
import pyaudio
if status != 0:
self.junklog("pya_callback status %d\n" % (status))
pcm = numpy.fromstring(in_data, dtype=numpy.int16)
pcm = pcm[self.chan::self.chans]
assert frame_count == len(pcm)
# time of first sample in pcm[], in seconds since start.
adc_time = time_info['input_buffer_adc_time']
# time of last sample
adc_end = adc_time + (len(pcm) / float(self.cardrate))
if self.last_adc_end != None:
if adc_end < self.last_adc_end or adc_end > self.last_adc_end + 5:
self.junklog("pya last_adc_end %s adc_end %s" % (self.last_adc_end, adc_end))
expected = (adc_end - self.last_adc_end) * float(self.cardrate)
expected = int(round(expected))
shortfall = expected - len(pcm)
if abs(shortfall) > 20:
self.junklog("pya expected %d got %d" % (expected, len(pcm)))
#if shortfall > 100:
# pcm = numpy.append(numpy.zeros(shortfall, dtype=pcm.dtype), pcm)
self.last_adc_end = adc_end
# set up to convert from stream time to UNIX time.
# pya_strm.get_time() returns the UNIX time corresponding
# to the current audio stream time. it's PortAudio's Pa_GetStreamTime().
if self.t0 == None:
if self.pya_strm == None:
return ( None, pyaudio.paContinue )
ut = time.time()
st = self.pya_strm.get_time()
self.t0 = ut - st
# translate time of last sample to UNIX time.
unix_end = adc_end + self.t0
self.cardlock.acquire()
self.cardbufs.append([ pcm, unix_end ])
self.cardlock.release()
return ( None, pyaudio.paContinue )
def pya_open(self):
self.cardrate = pya_input_rate(self.card, self.rate)
# read from sound card in a separate process, since Python
# scheduler seems sometimes not to run the py audio thread
# often enough.
sys.stdout.flush()
rpipe, wpipe = multiprocessing.Pipe(False)
proc = multiprocessing.Process(target=self.pya_dev2pipe, args=[rpipe,wpipe])
proc.start()
wpipe.close()
self.rpipe = rpipe
# executes in a sub-process.
def pya_dev2pipe(self, rpipe, wpipe):
import pyaudio
rpipe.close()
if "freebsd" in sys.platform:
# always ask for 2 channels, since on FreeBSD if you
# open left with chans=1 and right with chans=2 you
# get mixing.
self.chans = 2
else:
# but needs to be 1 for RigBlaster on Linux.
self.chans = 1
assert self.chan < self.chans
# perhaps this controls how often the callback is called.
# too big and ft8.py's read() is delayed long enough to
# cut into FT8 decoding time. too small and apparently the
# callback thread can't keep up.
bufsize = int(self.cardrate / 8) # was 4
# pya.open in this sub-process so that pya starts the callback thread
# here too.
xpya = pya()
self.pya_strm = None
self.pya_strm = xpya.open(format=pyaudio.paInt16,
input_device_index=self.card,
channels=self.chans,
rate=self.cardrate,
frames_per_buffer=bufsize,
stream_callback=self.pya_callback,
output=False,
input=True)
# copy buffers from self.cardbufs, where pya_callback left them,
# to the pipe to the parent process. can't do this in the callback
# because the pipe write might block.
# each object on the pipe is [ pcm, unix_end ].
while True:
self.cardlock.acquire()
bufs = self.cardbufs
self.cardbufs = [ ]
self.cardlock.release()
if len(bufs) > 0:
for e in bufs:
try:
wpipe.send(e)
except:
os._exit(1)
else:
time.sleep(0.05)
def oss_open(self):
import ossaudiodev
self.oss = ossaudiodev.open("/dev/dsp" + str(self.card) + ".0", "r")
self.oss.setfmt(ossaudiodev.AFMT_S16_LE)
self.oss.channels(2)
assert self.oss.speed(self.rate) == self.rate
self.th = threading.Thread(target=lambda : self.oss_thread())
self.th.daemon = True
self.th.start()
# dedicating reading thread because oss's buffering seems
# to be pretty limited, and wspr.py spends 50 seconds in
# process() while not calling read().
def oss_thread(self):
# XXX the card probably doesn't read the first sample at this
# exact point, and probably doesn't read at exactly self.rate
# samples per second.
self.cardtime = time.time()
while True:
# the read() blocks.
buf = self.oss.read(8192)
assert len(buf) > 0
both = numpy.fromstring(buf, dtype=numpy.int16)
got = both[self.chan::self.chans]
self.cardlock.acquire()
self.cardbufs.append(got)
self.cardtime += len(got) / float(self.rate)
self.cardlock.release()
# print levels, to help me adjust volume control.
def levels(self):
while True:
time.sleep(1)
[ buf, junk ] = self.read()
if len(buf) > 0:
print("avg=%.0f max=%.0f" % (numpy.mean(abs(buf)), numpy.max(buf)))
class SDRIP:
def __init__(self, ip, rate):
if rate == None:
rate = 11025
self.ip = ip
self.rate = rate
self.sdrrate = 32000
self.fm = fmdemod.FMDemod(self.sdrrate)
self.resampler = weakutil.Resampler(self.sdrrate, self.rate)
self.sdr = sdrip.open(ip)
self.sdr.setrate(self.sdrrate)
#self.sdr.setgain(-10)
# now weakcat.SDRIP.read() calls setrun().
#self.sdr.setrun()
self.starttime = None # for faking a sample clock
self.cardcount = 0 # for faking a sample clock
self.bufbuf = [ ]
self.cardlock = threading.Lock()
self.th = threading.Thread(target=lambda : self.sdr_thread())
self.th.daemon = True
self.th.start()
# rate at which len(self.raw_read()) increases.
self.rawrate = self.sdrrate
def junklog(self, msg):
msg1 = "[%s] %s\n" % (self.ip, msg)
#sys.stderr.write(msg1)
f = open("ft8-junk.txt", "a")
f.write(msg1)
f.close()
# returns [ buf, tm ]
# where tm is UNIX seconds of the last sample.
def read(self):
[ buf1, tm ] = self.raw_read()
buf2 = self.postprocess(buf1)
return [ buf2, tm ]
def raw_read(self):
# delay setrun() until the last moment, so that
# all other parameters have likely been set.
if self.sdr.running == False:
self.sdr.setrun()
self.cardlock.acquire()
bufbuf = self.bufbuf
cardcount = self.cardcount
self.bufbuf = [ ]
self.cardlock.release()
if self.starttime != None:
buf_time = self.starttime + cardcount / float(self.sdrrate)
else:
buf_time = time.time() # XXX
if len(bufbuf) == 0:
return [ numpy.array([]), buf_time ]
buf1 = numpy.concatenate(bufbuf)
return [ buf1, buf_time ]
def postprocess(self, buf1):
if len(buf1) == 0:
return numpy.array([])
if self.sdr.mode == "usb":
buf2 = weakutil.iq2usb(buf1) # I/Q -> USB
elif self.sdr.mode == "fm":
[ buf2, junk ] = self.fm.demod(buf1) # I/Q -> FM
else:
sys.stderr.write("weakaudio: SDRIP unknown mode %s\n" % (self.sdr.mode))
sys.exit(1)
buf3 = self.resampler.resample(buf2)
return buf3
def sdr_thread(self):
while True:
# read pipe from sub-process.
got = self.sdr.readiq()
self.cardlock.acquire()
self.bufbuf.append(got)
self.cardcount += len(got)
if self.starttime == None:
self.starttime = time.time()
self.cardlock.release()
# print levels, to help me adjust volume control.
def levels(self):
while True:
time.sleep(1)
[ buf, junk ] = self.read()
if len(buf) > 0:
print("avg=%.0f max=%.0f" % (numpy.mean(abs(buf)), numpy.max(buf)))
class SDRIQ:
def __init__(self, ip, rate):
if rate == None:
rate = 11025
self.rate = rate
self.sdrrate = 8138
self.bufbuf = [ ]
self.starttime = time.time() # for faking a sample clock
self.cardcount = 0 # for faking a sample clock
self.cardlock = threading.Lock()
self.resampler = weakutil.Resampler(self.sdrrate, self.rate)
self.sdr = sdriq.open(ip)
self.sdr.setrate(self.sdrrate)
self.sdr.setgain(0)
self.sdr.setifgain(18) # I don't know how to set this!
self.th = threading.Thread(target=lambda : self.sdr_thread())
self.th.daemon = True
self.th.start()
self.rawrate = self.sdrrate
# returns [ buf, tm ]
# where tm is UNIX seconds of the last sample.
def read(self):
[ buf1, tm ] = self.raw_read()
buf2 = self.postprocess(buf1)
return [ buf2, tm ]
def raw_read(self):
if self.sdr.running == False:
self.sdr.setrun(True)
self.cardlock.acquire()
bufbuf = self.bufbuf
cardcount = self.cardcount
self.bufbuf = [ ]
self.cardlock.release()
buf_time = self.starttime + cardcount / float(self.sdrrate)
if len(bufbuf) == 0:
return [ numpy.array([]), buf_time ]
buf = numpy.concatenate(bufbuf)
return [ buf, buf_time ]
def postprocess(self, buf1):
if len(buf1) == 0:
return numpy.array([])
buf = weakutil.iq2usb(buf1) # I/Q -> USB
buf = self.resampler.resample(buf)
# no matter how I set its RF or IF gain,
# the SDR-IQ generates peaks around 145000,
# or I and Q values of 65535. cut this down
# so application doesn't think the SDR-IQ is clipping.
buf = buf / 10.0
return buf
def sdr_thread(self):
self.starttime = time.time()
while True:
# read i/q blocks, float64, to reduce CPU time in
# this thread, which drains the UDP socket.
got = self.sdr.readiq()
self.cardlock.acquire()
self.bufbuf.append(got)
self.cardcount += len(got)
self.cardlock.release()
# print levels, to help me adjust volume control.
def levels(self):
while True:
time.sleep(1)
[ buf, junk ] = self.read()
if len(buf) > 0:
print("avg=%.0f max=%.0f" % (numpy.mean(abs(buf)), numpy.max(buf)))
class EB200:
def __init__(self, ip, rate):
if rate == None:
rate = 8000
self.rate = rate
self.time_mu = threading.Lock()
self.cardtime = time.time() # UNIX time just after last sample in bufbuf
self.sdr = eb200.open(ip)
self.sdrrate = self.sdr.getrate()
self.resampler = weakutil.Resampler(self.sdrrate, self.rate)
# returns [ buf, tm ]
# where tm is UNIX seconds of the last sample.
# blocks until input is available.
def read(self):
buf = self.sdr.readaudio()
self.time_mu.acquire()
self.cardtime += len(buf) / float(self.sdrrate)
buf_time = self.cardtime
self.time_mu.release()
buf = self.resampler.resample(buf)
return [ buf, buf_time ]
# print levels, to help me adjust volume control.
def levels(self):
while True:
time.sleep(1)
[ buf, junk ] = self.read()
if len(buf) > 0:
print("avg=%.0f max=%.0f" % (numpy.mean(abs(buf)), numpy.max(buf)))
class SDRplay:
def __init__(self, dev, rate):
if rate == None:
rate = 11025
self.rate = rate
self.sdr = sdrplay.open(dev)
self.sdrrate = self.sdr.getrate()
self.resampler = weakutil.Resampler(self.sdrrate, self.rate)
# returns [ buf, tm ]
# where tm is UNIX seconds of the last sample.
def read(self):
[ buf, buf_time ] = self.sdr.readiq()
buf = weakutil.iq2usb(buf) # I/Q -> USB
buf = self.resampler.resample(buf)
return [ buf, buf_time ]
# print levels, to help me adjust volume control.
def levels(self):
while True:
time.sleep(1)
[ buf, junk ] = self.read()
if len(buf) > 0:
print("avg=%.0f max=%.0f" % (numpy.mean(abs(buf)), numpy.max(buf)))
#
# for Usage(), print out a list of audio cards
# and associated number (for the "card" argument).
#
def usage():
import pyaudio
ndev = pya().get_device_count()
sys.stderr.write("sound card numbers for -card and -out:\n")
for i in range(0, ndev):
info = pya().get_device_info_by_index(i)
sys.stderr.write(" %d: %s, channels=%d" % (i,
info['name'],
info['maxInputChannels']))
if True and info['maxInputChannels'] > 0:
rates = [ 11025, 12000, 16000, 22050, 44100, 48000 ]
for rate in rates:
try:
ok = pya().is_format_supported(rate,
input_device=i,
input_format=pyaudio.paInt16,
input_channels=1)
except:
ok = False
if ok:
sys.stderr.write(" %d" % (rate))
sys.stderr.write("\n")
sys.stderr.write(" or -card sdrip IPADDR\n")
sys.stderr.write(" or -card sdriq /dev/SERIALPORT\n")
sys.stderr.write(" or -card eb200 IPADDR\n")
sys.stderr.write(" or -card sdrplay sdrplay\n")
# implement -levels.
# print sound card avg/peak once per second, to adjust level.
# never returns.
def levels(card):
if card == None:
sys.stderr.write("-levels requires -card\n")
sys.exit(1)
c = new(card, 11025)
c.levels()
sys.exit(0)
|
repository.py
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import functools
import logging
import os
import re
import shutil
import subprocess
from argparse import ArgumentParser, _SubParsersAction
from contextlib import contextmanager
from textwrap import dedent
from threading import Thread
from pex import dist_metadata
from pex.commands.command import JsonMixin, OutputMixin
from pex.common import (
DETERMINISTIC_DATETIME_TIMESTAMP,
pluralize,
safe_mkdir,
safe_mkdtemp,
safe_open,
)
from pex.compatibility import Queue
from pex.environment import PEXEnvironment
from pex.interpreter import PythonIdentity, PythonInterpreter, spawn_python_job
from pex.jobs import Retain, SpawnedJob, execute_parallel
from pex.pex import PEX
from pex.result import Error, Ok, Result
from pex.third_party.pkg_resources import Distribution
from pex.tools.command import PEXCommand
from pex.typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
from typing import IO, Callable, Iterable, Iterator, Text, Tuple
import attr # vendor:skip
RepositoryFunc = Callable[["Repository", PEX], Result]
else:
from pex.third_party import attr
logger = logging.getLogger(__name__)
@attr.s(frozen=True)
class FindLinksRepo(object):
@classmethod
def serve(
cls,
interpreter, # type: PythonInterpreter
port, # type: int
directory, # type: str
):
# type: (...) -> FindLinksRepo
http_server_module = "SimpleHTTPServer" if interpreter.version[0] == 2 else "http.server"
cmd, http_server_process = interpreter.open_process(
# N.B.: Running Python in unbuffered mode here is critical to being able to read stdout.
args=["-u", "-m", http_server_module, str(port)],
cwd=directory,
stdout=subprocess.PIPE,
)
real_port = Queue() # type: Queue[int]
def read_data():
try:
data = http_server_process.stdout.readline()
match = re.match(br"^Serving HTTP on [^\s]+ port (?P<port>\d+)[^\d]", data)
real_port.put(int(match.group("port")))
finally:
real_port.task_done()
reader = Thread(target=read_data)
reader.daemon = True
reader.start()
real_port.join()
reader.join()
return cls(cmd=cmd, port=real_port.get(), server_process=http_server_process)
cmd = attr.ib() # type: Iterable[str]
port = attr.ib() # type: int
_server_process = attr.ib() # type: subprocess.Popen
@property
def pid(self):
# type: () -> int
return self._server_process.pid
def join(self):
# type: () -> int
return self._server_process.wait()
def kill(self):
# type: () -> None
self._server_process.kill()
class Repository(JsonMixin, OutputMixin, PEXCommand):
"""Interact with the Python distribution repository contained in a PEX file."""
@classmethod
def _add_info_arguments(cls, subparsers):
# type: (_SubParsersAction) -> ArgumentParser
info_parser = cast(
ArgumentParser,
subparsers.add_parser(
name="info", help="Print information about the distributions in a PEX file."
),
)
info_parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Print the distributions requirements in addition to its name version and path.",
)
cls.add_json_options(info_parser, entity="verbose output")
cls.register_global_arguments(info_parser, include_verbosity=False)
return info_parser
@classmethod
def _add_extract_arguments(cls, subparsers):
# type: (_SubParsersAction) -> ArgumentParser
extract_parser = cast(
ArgumentParser,
subparsers.add_parser(
name="extract", help="Extract all distributions from a PEX file."
),
)
extract_parser.add_argument(
"-f",
"--dest-dir",
"--find-links",
"--repo",
metavar="PATH",
help="The path to extract distribution as wheels to.",
)
extract_parser.add_argument(
"-D",
"--sources",
action="store_true",
help="Also extract a wheel for the PEX file sources.",
)
extract_parser.add_argument(
"--use-system-time",
dest="use_system_time",
default=False,
action="store_true",
help=(
"Use the current system time to generate timestamps for the extracted "
"distributions. Otherwise, Pex will use midnight on January 1, 1980. By using "
"system time, the extracted distributions will not be reproducible, meaning that "
"if you were to re-run extraction against the same PEX file then the newly "
"extracted distributions would not be byte-for-byte identical distributions "
"extracted in prior runs."
),
)
extract_parser.add_argument(
"--serve",
action="store_true",
help="Serve the --find-links repo.",
)
extract_parser.add_argument(
"--port",
type=int,
default=0,
metavar="PORT",
help="The port to serve the --find-links repo on.",
)
extract_parser.add_argument(
"--pid-file",
metavar="PATH",
help="The path of a file to write the <pid>:<port> of the find links server to.",
)
cls.register_global_arguments(extract_parser)
return extract_parser
@classmethod
def add_arguments(cls, parser):
# type: (ArgumentParser) -> None
cls.add_output_option(parser, entity="distribution information")
parser.set_defaults(repository_func=functools.partial(cls.show_help, parser))
subparsers = parser.add_subparsers(
description=(
"A PEX distribution repository can be operated on using any of the following "
"subcommands."
)
)
cls._add_info_arguments(subparsers).set_defaults(repository_func=cls._info)
cls._add_extract_arguments(subparsers).set_defaults(repository_func=cls._extract)
def run(self, pex):
# type: (PEX) -> Result
repository_func = cast("RepositoryFunc", self.options.repository_func)
return repository_func(self, pex)
@contextmanager
def _distributions_output(self, pex):
# type: (PEX) -> Iterator[Tuple[Iterable[Distribution], IO]]
with self.output(self.options) as out:
yield tuple(pex.resolve()), out
def _info(self, pex):
# type: (PEX) -> Result
with self._distributions_output(pex) as (distributions, output):
for distribution in distributions:
if self.options.verbose:
requires_python = dist_metadata.requires_python(distribution)
requires_dists = list(dist_metadata.requires_dists(distribution))
self.dump_json(
self.options,
dict(
project_name=distribution.project_name,
version=distribution.version,
requires_python=str(requires_python) if requires_python else None,
requires_dists=[str(dist) for dist in requires_dists],
location=distribution.location,
),
output,
)
else:
output.write(
"{project_name} {version} {location}".format(
project_name=distribution.project_name,
version=distribution.version,
location=distribution.location,
)
)
output.write("\n")
return Ok()
def _extract(self, pex):
# type: (PEX) -> Result
if not self.options.serve and not self.options.dest_dir:
return Error("Specify a --find-links directory to extract wheels to.")
dest_dir = (
os.path.abspath(os.path.expanduser(self.options.dest_dir))
if self.options.dest_dir
else safe_mkdtemp()
)
safe_mkdir(dest_dir)
if self.options.sources:
self._extract_sdist(pex, dest_dir)
def spawn_extract(distribution):
# type: (Distribution) -> SpawnedJob[Text]
env = os.environ.copy()
if not self.options.use_system_time:
# N.B.: The `SOURCE_DATE_EPOCH` env var is semi-standard magic for controlling
# build tools. Wheel has supported this since 2016.
# See:
# + https://reproducible-builds.org/docs/source-date-epoch/
# + https://github.com/pypa/wheel/blob/1b879e53fed1f179897ed47e55a68bc51df188db/wheel/archive.py#L36-L39
env.update(SOURCE_DATE_EPOCH=str(int(DETERMINISTIC_DATETIME_TIMESTAMP)))
job = spawn_python_job(
args=["-m", "wheel", "pack", "--dest-dir", dest_dir, distribution.location],
interpreter=pex.interpreter,
expose=["wheel"],
stdout=subprocess.PIPE,
env=env,
)
return SpawnedJob.stdout(
job, result_func=lambda out: "{}: {}".format(distribution, out.decode())
)
with self._distributions_output(pex) as (distributions, output):
errors = []
for result in execute_parallel(distributions, spawn_extract, error_handler=Retain()):
if isinstance(result, tuple):
distribution, error = result
errors.append(distribution)
output.write(
"Failed to build a wheel for {distribution}: {error}\n".format(
distribution=distribution, error=error
)
)
else:
output.write(result)
if errors:
return Error(
"Failed to build wheels for {count} {distributions}.".format(
count=len(errors), distributions=pluralize(errors, "distribution")
)
)
if not self.options.serve:
return Ok()
repo = FindLinksRepo.serve(
interpreter=pex.interpreter, port=self.options.port, directory=dest_dir
)
output.write(
"Serving find-links repo of {pex} via {find_links} at http://localhost:{port}\n".format(
pex=os.path.normpath(pex.path()), find_links=dest_dir, port=repo.port
)
)
if self.options.pid_file:
with safe_open(self.options.pid_file, "w") as fp:
fp.write("{}:{}".format(repo.pid, repo.port))
try:
return Result(exit_code=repo.join(), message=" ".join(repo.cmd))
except KeyboardInterrupt:
repo.kill()
return Ok("Shut down server for find links repo at {}.".format(dest_dir))
@staticmethod
def _extract_sdist(
pex, # type: PEX
dest_dir, # type: str
):
# type: (...) -> None
pex_info = pex.pex_info()
chroot = safe_mkdtemp()
pex_path = pex.path()
src = os.path.join(chroot, "src")
excludes = ["__main__.py", pex_info.PATH, pex_info.bootstrap, pex_info.internal_cache]
shutil.copytree(
PEXEnvironment.mount(pex_path).path, src, ignore=lambda _dir, _names: excludes
)
name, _ = os.path.splitext(os.path.basename(pex_path))
version = "0.0.0+{}".format(pex_info.code_hash)
zip_safe = False # Since PEX files never require code to be zip safe, assume it isn't.
py_modules = [os.path.splitext(f)[0] for f in os.listdir(src) if f.endswith(".py")]
packages = [
os.path.relpath(os.path.join(root, d), src).replace(os.sep, ".")
for root, dirs, _ in os.walk(src)
for d in dirs
]
install_requires = [str(req) for req in pex_info.requirements]
python_requires = None
if len(pex_info.interpreter_constraints) == 1:
python_requires = str(
PythonIdentity.parse_requirement(pex_info.interpreter_constraints[0]).specifier
)
elif pex_info.interpreter_constraints:
logger.warning(
"Omitting `python_requires` for {name} sdist since {pex} has multiple "
"interpreter constraints:\n{interpreter_constraints}".format(
name=name,
pex=os.path.normpath(pex_path),
interpreter_constraints="\n".join(
"{index}.) {constraint}".format(index=index, constraint=constraint)
for index, constraint in enumerate(
pex_info.interpreter_constraints, start=1
)
),
)
)
entry_points = []
if pex_info.entry_point and ":" in pex_info.entry_point:
entry_points = [(name, pex_info.entry_point)]
with open(os.path.join(chroot, "setup.cfg"), "w") as fp:
fp.write(
dedent(
"""\
[metadata]
name = {name}
version = {version}
[options]
zip_safe = {zip_safe}
{py_modules}
{packages}
package_dir =
=src
include_package_data = True
{python_requires}
{install_requires}
[options.entry_points]
{entry_points}
"""
).format(
name=name,
version=version,
zip_safe=zip_safe,
py_modules=(
"py_modules =\n {}".format("\n ".join(py_modules)) if py_modules else ""
),
packages=(
"packages = \n {}".format("\n ".join(packages)) if packages else ""
),
install_requires=(
"install_requires =\n {}".format("\n ".join(install_requires))
if install_requires
else ""
),
python_requires=(
"python_requires = {}".format(python_requires) if python_requires else ""
),
entry_points=(
"console_scripts =\n {}".format(
"\n ".join(
"{} = {}".format(name, entry_point)
for name, entry_point in entry_points
)
)
if entry_points
else ""
),
)
)
with open(os.path.join(chroot, "MANIFEST.in"), "w") as fp:
fp.write("recursive-include src *")
with open(os.path.join(chroot, "setup.py"), "w") as fp:
fp.write("import setuptools; setuptools.setup()")
spawn_python_job(
args=["setup.py", "sdist", "--dist-dir", dest_dir],
interpreter=pex.interpreter,
expose=["setuptools"],
cwd=chroot,
).wait()
|
__init__.py
|
# coding=utf-8
"""
© 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the
License.?You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an
"AS IS" BASIS,?WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
from collections import defaultdict
import ConfigParser
import errno
import logging
import os
import threading
import naarad.utils
import naarad.naarad_constants as CONSTANTS
from naarad_imports import metric_classes
from naarad_imports import aggregate_metric_classes
from naarad_imports import graphing_modules
from naarad_imports import reporting_modules
from naarad.reporting.diff import Diff
from naarad.reporting.diff import NaaradReport
logger = logging.getLogger('naarad')
class _Analysis(object):
"""
Class that saves state for analysis to be conducted
"""
def __init__(self, ts_start, config, test_id=None):
self.ts_start = ts_start
self.ts_end = None
self.test_id = test_id
self.config = config
self.description = ''
self.input_directory = None
self.output_directory = None
self.resource_path = 'resources'
self.status = CONSTANTS.OK
self.sla_data = {}
self.stats_data = {}
self.variables = None
class Naarad(object):
"""
Naarad base class that will let the caller run multiple naarad analysis
"""
def __init__(self):
self._default_test_id = -1
self._analyses = {}
self._resource_path = 'resources'
self._input_directory = None
self._output_directory = None
self.return_exit_code = False
self.skip_plots = False
self.available_graphing_modules = graphing_modules
logger.info('Available graphing modules: %s ', ','.join(self.available_graphing_modules.keys()))
naarad.metrics.metric.Metric.graphing_modules = self.available_graphing_modules
naarad.reporting.diff.Diff.graphing_modules = self.available_graphing_modules
naarad.metrics.metric.Metric.device_types = CONSTANTS.device_type_metrics
def signal_start(self, config, test_id=None, **kwargs):
"""
Initialize an analysis object and set ts_start for the analysis represented by test_id
:param test_id: integer that represents the analysis
:param config: config can be a ConfigParser.ConfigParser object or a string specifying local or http(s) location
for config
:return: test_id
"""
if not test_id:
self._default_test_id += 1
test_id = self._default_test_id
self._analyses[test_id] = _Analysis(naarad.utils.get_standardized_timestamp('now', None), config,
test_id=test_id)
if kwargs:
if 'description' in kwargs.keys():
self._analyses[test_id].description = kwargs['description']
if 'input_directory' in kwargs.keys():
self._analyses[test_id].input_directory = kwargs['input_directory']
if 'output_directory' in kwargs.keys():
self._analyses[test_id].output_directory = kwargs['output_directory']
return test_id
def signal_stop(self, test_id=None):
"""
Set ts_end for the analysis represented by test_id
:param test_id: integer that represents the analysis
:return: test_id
"""
if test_id is None:
test_id = self._default_test_id
if self._analyses[test_id].ts_end:
return CONSTANTS.OK
self._analyses[test_id].ts_end = naarad.utils.get_standardized_timestamp('now', None)
return CONSTANTS.OK
def get_failed_analyses(self):
"""
Returns a list of test_id for which naarad analysis failed
:return: list of test_ids
"""
failed_analyses = []
for test_id in self._analyses.keys():
if self._analyses[test_id].status != CONSTANTS.OK:
failed_analyses.append(test_id)
return failed_analyses
def get_sla_data(self, test_id):
"""
Returns sla data for all the metrics associated with a test_id
:return: dict of form { metric.label:metric.sla_map}
"""
return self._analyses[test_id].sla_data
def _set_sla_data(self, test_id, metrics):
"""
Get sla data from each metric and set it in the _Analysis object specified by test_id to make it available
for retrieval
:return: currently always returns CONSTANTS.OK. Maybe enhanced in future to return additional status
"""
for metric in metrics:
self._analyses[test_id].sla_data[metric.label] = metric.sla_map
return CONSTANTS.OK
def get_stats_data(self, test_id):
"""
Returns summary stats data for all the metrics associated with a test_id
:return: dict of form { metric.label:metric.summary_stats}
"""
return self._analyses[test_id].stats_data
def _set_stats_data(self, test_id, metrics):
"""
Get summary stats data from each metric and set it in the _Analysis object specified by test_id to make it available
for retrieval
:return: currently always returns CONSTANTS.OK. Maybe enhanced in future to return additional status
"""
for metric in metrics:
self._analyses[test_id].stats_data[metric.label] = metric.summary_stats
return CONSTANTS.OK
def _create_output_directories(self, analysis):
"""
Create the necessary output and resource directories for the specified analysis
:param: analysis: analysis associated with a given test_id
"""
try:
os.makedirs(analysis.output_directory)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
try:
resource_directory = os.path.join(analysis.output_directory, analysis.resource_path)
os.makedirs(resource_directory)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def _run_pre(self, analysis, run_steps):
"""
If Naarad is run in CLI mode, execute any pre run steps specified in the config. ts_start/ts_end are set based on
workload run steps if any.
:param: analysis: The analysis object being processed
:param: run_steps: list of post run steps
"""
workload_run_steps = []
for run_step in sorted(run_steps, key=lambda step: step.run_rank):
run_step.run()
if run_step.run_type == CONSTANTS.RUN_TYPE_WORKLOAD:
workload_run_steps.append(run_step)
# Get analysis time period from workload run steps
if len(workload_run_steps) > 0:
analysis.ts_start, analysis.ts_end = naarad.utils.get_run_time_period(workload_run_steps)
return CONSTANTS.OK
def _run_post(self, run_steps):
"""
If Naarad is run in CLI mode, execute any post run steps specified in the config
:param: run_steps: list of post run steps
"""
for run_step in sorted(run_steps, key=lambda step: step.run_rank):
run_step.run()
return CONSTANTS.OK
def _process_args(self, analysis, args):
"""
When Naarad is run in CLI mode, get the CL arguments and update the analysis
:param: analysis: The analysis being processed
:param: args: Command Line Arguments received by naarad
"""
if args.exit_code:
self.return_exit_code = args.exit_code
if args.no_plots:
self.skip_plots = args.no_plots
if args.start:
analysis.ts_start = naarad.utils.get_standardized_timestamp(args.start, None)
if args.end:
analysis.ts_end = naarad.utils.get_standardized_timestamp(args.end, None)
if args.variables:
analysis.variables = naarad.utils.get_variables(args)
return CONSTANTS.OK
def analyze(self, input_directory, output_directory, **kwargs):
"""
Run all the analysis saved in self._analyses, sorted by test_id
:param: input_directory: location of log files
:param: output_directory: root directory for analysis output
:param: **kwargs: Optional keyword args
:return: int: status code.
"""
is_api_call = True
if len(self._analyses) == 0:
if 'config' not in kwargs.keys():
return CONSTANTS.ERROR
self._analyses[0] = _Analysis(None, kwargs['config'], test_id=0)
if 'args' in kwargs:
self._process_args(self._analyses[0], kwargs['args'])
is_api_call = False
error_count = 0
self._input_directory = input_directory
self._output_directory = output_directory
for test_id in sorted(self._analyses.keys()):
if not self._analyses[test_id].input_directory:
self._analyses[test_id].input_directory = input_directory
if not self._analyses[test_id].output_directory:
if len(self._analyses) > 1:
self._analyses[test_id].output_directory = os.path.join(output_directory, str(test_id))
else:
self._analyses[test_id].output_directory = output_directory
if('config' in kwargs.keys()) and (not self._analyses[test_id].config):
self._analyses[test_id].config = kwargs['config']
self._create_output_directories(self._analyses[test_id])
self._analyses[test_id].status = self.run(self._analyses[test_id], is_api_call, **kwargs)
if self._analyses[test_id].status != CONSTANTS.OK:
error_count += 1
if error_count > 0:
return CONSTANTS.ERROR
else:
return CONSTANTS.OK
def run(self, analysis, is_api_call, **kwargs):
"""
:param analysis: Run naarad analysis for the specified analysis object
:param **kwargs: Additional keyword args can be passed in here for future enhancements
:return:
"""
threads = []
crossplots = []
report_args = {}
metrics = defaultdict()
run_steps = defaultdict(list)
discovery_mode = False
graph_timezone = None
graphing_library = None
if isinstance(analysis.config, str):
if not naarad.utils.is_valid_file(analysis.config):
return CONSTANTS.INVALID_CONFIG
config_object = ConfigParser.ConfigParser(analysis.variables)
config_object.optionxform = str
config_object.read(analysis.config)
elif isinstance(analysis.config, ConfigParser.ConfigParser):
config_object = analysis.config
else:
if is_api_call:
return CONSTANTS.INVALID_CONFIG
else:
metrics['metrics'] = naarad.utils.discover_by_name(analysis.input_directory, analysis.output_directory)
if len(metrics['metrics']) == 0:
logger.warning('Unable to auto detect metrics in the specified input directory: %s', analysis.input_directory)
return CONSTANTS.ERROR
else:
discovery_mode = True
metrics['aggregate_metrics'] = []
if not discovery_mode:
metrics, run_steps, crossplots, report_args, graph_timezone, graphing_library = self._process_naarad_config(config_object, analysis)
if graphing_library is None:
graphing_library = CONSTANTS.DEFAULT_GRAPHING_LIBRARY
# If graphing libraries are not installed, skip static images
if not graphing_library in self.available_graphing_modules.keys():
logger.error("Naarad cannot import graphing library %s on your system. Will not generate static charts", graphing_library)
self.skip_plots = True
if not is_api_call:
self._run_pre(analysis, run_steps['pre'])
for metric in metrics['metrics']:
if analysis.ts_start:
metric.ts_start = analysis.ts_start
if analysis.ts_end:
metric.ts_end = analysis.ts_end
thread = threading.Thread(target=naarad.utils.parse_and_plot_single_metrics, args=(metric, graph_timezone, analysis.output_directory, analysis.input_directory, graphing_library, self.skip_plots))
thread.start()
threads.append(thread)
for t in threads:
t.join()
for metric in metrics['aggregate_metrics']:
thread = threading.Thread(target=naarad.utils.parse_and_plot_single_metrics, args=(metric, graph_timezone, analysis.output_directory, analysis.input_directory, graphing_library, self.skip_plots))
thread.start()
threads.append(thread)
for t in threads:
t.join()
self._set_sla_data(analysis.test_id, metrics['metrics'] + metrics['aggregate_metrics'])
self._set_stats_data(analysis.test_id, metrics['metrics'] + metrics['aggregate_metrics'])
if len(crossplots) > 0 and not self.skip_plots:
correlated_plots = naarad.utils.nway_plotting(crossplots, metrics['metrics'] + metrics['aggregate_metrics'],
os.path.join(analysis.output_directory, analysis.resource_path),
analysis.resource_path, graphing_library)
else:
correlated_plots = []
rpt = reporting_modules['report'](None, analysis.output_directory, os.path.join(analysis.output_directory, analysis.resource_path), analysis.resource_path, metrics['metrics'] + metrics['aggregate_metrics'], correlated_plots=correlated_plots, **report_args)
rpt.generate()
if not is_api_call:
self._run_post(run_steps['post'])
if self.return_exit_code:
for metric in metrics['metrics'] + metrics['aggregate_metrics']:
if metric.status == CONSTANTS.SLA_FAILED:
return CONSTANTS.SLA_FAILURE
return CONSTANTS.OK
def diff(self, test_id_1, test_id_2, config=None, **kwargs):
"""
Create a diff report using test_id_1 as a baseline
:param: test_id_1: test id to be used as baseline
:param: test_id_2: test id to compare against baseline
:param: config file for diff (optional)
:param: **kwargs: keyword arguments
"""
output_directory = os.path.join(self._output_directory,'diff_' + str(test_id_1) + '_' + str(test_id_2))
if kwargs:
if 'output_directory' in kwargs.keys():
output_directory = kwargs['output_directory']
diff_report = Diff([NaaradReport(self._analyses[test_id_1].output_directory, None),
NaaradReport(self._analyses[test_id_2].output_directory, None)],
'diff', output_directory, os.path.join(output_directory, self._resource_path),
self._resource_path)
if config:
naarad.utils.extract_diff_sla_from_config_file(diff_report, config)
diff_report.generate()
if diff_report.sla_failures > 0:
return CONSTANTS.SLA_FAILURE
if diff_report.status != 'OK':
return CONSTANTS.ERROR
return CONSTANTS.OK
def diff_reports_by_location(self, report1_location, report2_location, output_directory, config=None, **kwargs):
"""
Create a diff report using report1 as a baseline
:param: report1_location: report to be used as baseline
:param: report2_location: report to compare against baseline
:param: config file for diff (optional)
:param: **kwargs: keyword arguments
"""
if kwargs:
if 'output_directory' in kwargs.keys():
output_directory = kwargs['output_directory']
diff_report = Diff([NaaradReport(report1_location, None), NaaradReport(report2_location, None)], 'diff',
output_directory, os.path.join(output_directory, self._resource_path), self._resource_path)
if config:
naarad.utils.extract_diff_sla_from_config_file(diff_report, config)
diff_report.generate()
if diff_report.sla_failures > 0:
return CONSTANTS.SLA_FAILURE
if diff_report.status != 'OK':
return CONSTANTS.ERROR
return CONSTANTS.OK
def _process_naarad_config(self, config, analysis):
"""
Process the config file associated with a particular analysis and return metrics, run_steps and crossplots.
Also sets output directory and resource_path for an anlaysis
"""
graph_timezone = None
output_directory = analysis.output_directory
resource_path = analysis.resource_path
run_steps = defaultdict(list)
metrics = defaultdict(list)
indir_default = ''
crossplots = []
report_args = {}
graphing_library = None
if config.has_section('GLOBAL'):
ts_start, ts_end = naarad.utils.parse_global_section(config, 'GLOBAL')
if config.has_option('GLOBAL', 'user_defined_metrics'):
naarad.utils.parse_user_defined_metric_classes(config, metric_classes)
config.remove_section('GLOBAL')
if config.has_section('REPORT'):
report_args = naarad.utils.parse_report_section(config, 'REPORT')
config.remove_section('REPORT')
for section in config.sections():
# GRAPH section is optional
if section == 'GRAPH':
graphing_library, crossplots, outdir_default, indir_default, graph_timezone = \
naarad.utils.parse_graph_section(config, section, output_directory, indir_default)
elif section.startswith('RUN-STEP'):
run_step = naarad.utils.parse_run_step_section(config, section)
if not run_step:
logger.error('Ignoring section %s, could not parse it correctly', section)
continue
if run_step.run_order == CONSTANTS.PRE_ANALYSIS_RUN:
run_steps['pre'].append(run_step)
# DURING_ANALYSIS_RUN not supported yet
elif run_step.run_order == CONSTANTS.DURING_ANALYSIS_RUN:
run_steps['in'].append(run_step)
elif run_step.run_order == CONSTANTS.POST_ANALYSIS_RUN:
run_steps['post'].append(run_step)
else:
logger.error('Unknown RUN-STEP run_order specified')
else:
# section name is used to create sub-directories, so enforce it.
if not naarad.utils.is_valid_metric_name(section):
logger.critical('Section name %s is invalid! Only letters, digits, dot(.), dash(-), underscore(_) are allowed'
% section)
return CONSTANTS.CRITICAL_FAILURE
if section == 'SAR-*':
hostname, infile, label, ts_start, ts_end, precision, kwargs, rule_strings = \
naarad.utils.parse_basic_metric_options(config, section)
sar_metrics = naarad.utils.get_all_sar_objects(metrics, infile, hostname, output_directory, label, ts_start,
ts_end, None)
metrics['metrics'].extend(sar_metrics)
else:
new_metric = naarad.utils.parse_metric_section(config, section, metric_classes, metrics['metrics'],
aggregate_metric_classes, output_directory, resource_path)
new_metric.bin_path = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))),'bin'))
metric_type = section.split('-')[0]
if metric_type in aggregate_metric_classes:
metrics['aggregate_metrics'].append(new_metric)
else:
metrics['metrics'].append(new_metric)
return metrics, run_steps, crossplots, report_args, graph_timezone, graphing_library
|
joystick.py
|
# Copyright (c) 2021 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel@uavcan.org>
from __future__ import annotations
from typing import Iterable, Tuple, Callable, Dict, Optional, List
import sys
import functools
import threading
import yakut
from . import Controller, Sample, ControllerError, ControllerNotFoundError
try:
import sdl2 # type: ignore
except ImportError as ex:
raise ImportError("Please install libsdl2: http://www.libsdl.org/download-2.0.php") from ex
class JoystickController(Controller):
"""
Interface for standard HID joystick.
"""
def __init__(self, index: int) -> None:
self._handle = sdl2.joystick.SDL_JoystickOpen(index)
if not self._handle:
raise ControllerNotFoundError(f"Cannot open joystick {index}")
self._id = sdl2.joystick.SDL_JoystickInstanceID(self._handle)
self._name = str(sdl2.joystick.SDL_JoystickNameForIndex(index).decode())
self._update_hook: Callable[[], None] = lambda: None
n_axes = sdl2.joystick.SDL_JoystickNumAxes(self._handle)
n_hats = sdl2.joystick.SDL_JoystickNumHats(self._handle)
n_buttons = sdl2.joystick.SDL_JoystickNumButtons(self._handle)
self._axes: List[float] = [
JoystickController._scale_axis(sdl2.joystick.SDL_JoystickGetAxis(self._handle, i)) for i in range(n_axes)
]
self._hats: List[Tuple[int, int]] = [
JoystickController._split_hat(sdl2.joystick.SDL_JoystickGetHat(self._handle, i)) for i in range(n_hats)
]
self._buttons: List[bool] = [sdl2.joystick.SDL_JoystickGetButton(self._handle, i) for i in range(n_buttons)]
self._counters: List[int] = [0 for _ in self._buttons]
_registry[self._id] = self._callback
_logger.info(
"%s: Joystick %r initial state: axes=%s hats=%s buttons=%s",
self,
index,
self._axes,
self._hats,
self._buttons,
)
@property
def name(self) -> str:
return self._name
def sample(self) -> Sample:
with _lock:
if _exception:
raise ControllerError("Worker thread failed") from _exception
axes_and_hats = self._axes.copy()
for x, y in self._hats:
axes_and_hats.append(float(x))
axes_and_hats.append(float(y))
return Sample(
axis=dict(enumerate(axes_and_hats)),
button=dict(enumerate(self._buttons)),
toggle={k: v % 2 != 0 for k, v in enumerate(self._counters)},
)
def set_update_hook(self, hook: Callable[[], None]) -> None:
self._update_hook = hook
def close(self) -> None:
with _lock:
_registry.pop(self._id, None)
sdl2.joystick.SDL_JoystickClose(self._handle)
def _callback(self, event: sdl2.SDL_Event) -> None:
if event.type == sdl2.SDL_JOYAXISMOTION:
self._axes[event.jaxis.axis] = JoystickController._scale_axis(event.jaxis.value)
elif event.type in (sdl2.SDL_JOYBUTTONDOWN, sdl2.SDL_JOYBUTTONUP):
if event.jbutton.state == sdl2.SDL_PRESSED:
self._buttons[event.jbutton.button] = True
self._counters[event.jbutton.button] += 1
else:
self._buttons[event.jbutton.button] = False
elif event.type == sdl2.SDL_JOYHATMOTION:
self._hats[event.jhat.hat] = JoystickController._split_hat(event.jhat.value)
else:
_logger.debug("%s: Event dropped: %r", self, event)
self._update_hook()
@staticmethod
def _scale_axis(raw: int) -> float:
if raw >= 0:
return raw / 32767.0
return raw / 32768.0
@staticmethod
def _split_hat(value: int) -> Tuple[int, int]:
return (
(bool(value & sdl2.SDL_HAT_RIGHT) - bool(value & sdl2.SDL_HAT_LEFT)),
(bool(value & sdl2.SDL_HAT_UP) - bool(value & sdl2.SDL_HAT_DOWN)),
)
@staticmethod
def list_controllers() -> Iterable[Tuple[str, Callable[[], Controller]]]:
def construct(index: int) -> Controller:
with _lock:
return JoystickController(index)
if not _init_done.wait(10.0): # pragma: no cover
raise _exception or ControllerError("The worker thread has failed to initialize")
with _lock:
num_joys = sdl2.joystick.SDL_NumJoysticks()
for idx in range(num_joys):
name = sdl2.joystick.SDL_JoystickNameForIndex(idx).decode()
yield name, functools.partial(construct, idx)
_exception: Optional[Exception] = None
_lock = threading.RLock()
_init_done = threading.Event()
_registry: Dict[sdl2.SDL_JoystickID, Callable[[sdl2.SDL_Event], None]] = {}
def _dispatch_joy(joystick: sdl2.SDL_JoystickID, event: sdl2.SDL_Event) -> None:
with _lock:
try:
_registry[joystick](event)
except KeyError:
_logger.debug("No handler for joystick %r; dropping event %r", joystick, event)
def _run_sdl2() -> None:
# Shall we require SDL2 somewhere else in this app, this logic will have to be extracted into a shared component.
global _exception # pylint: disable=global-statement
try:
import ctypes
# Initialization and event processing should be done in the same thread.
init_subsystems = sdl2.SDL_INIT_JOYSTICK
if sys.platform.startswith("win"): # pragma: no cover
# I don't understand why is this necessary but joystick does not work on Windows without the video subsystem
init_subsystems |= sdl2.SDL_INIT_VIDEO
err = sdl2.SDL_Init(init_subsystems)
if err != 0:
raise ControllerError(f"Could not initialize SDL2: {sdl2.SDL_GetError()!r}")
sdl2.SDL_JoystickEventState(sdl2.SDL_ENABLE)
sdl2.SDL_SetHint(sdl2.SDL_HINT_JOYSTICK_ALLOW_BACKGROUND_EVENTS, b"1")
_logger.debug("SDL2 initialized successfully, entering the event loop")
_init_done.set()
event = sdl2.SDL_Event()
while True:
if sdl2.SDL_WaitEvent(ctypes.byref(event)) != 1:
raise ControllerError(f"Could not poll event: {sdl2.SDL_GetError()!r}")
if event.type == sdl2.SDL_JOYAXISMOTION:
_dispatch_joy(event.jaxis.which, event)
elif event.type == sdl2.SDL_JOYBALLMOTION:
_dispatch_joy(event.jball.which, event)
elif event.type in (sdl2.SDL_JOYBUTTONDOWN, sdl2.SDL_JOYBUTTONUP):
_dispatch_joy(event.jbutton.which, event)
elif event.type == sdl2.SDL_JOYHATMOTION:
_dispatch_joy(event.jhat.which, event)
else:
_logger.debug("Event dropped: %r", event)
except Exception as ex: # pylint: disable=broad-except
_exception = ex
_logger.exception("SDL2 worker thread failed: %s", ex)
_logger = yakut.get_logger(__name__)
threading.Thread(target=_run_sdl2, name="sdl2_worker", daemon=True).start()
|
processing1.py
|
#!/usr/bin/env python
from processing import Process, Queue
import time
def f(q):
x = q.get()
print "Process number %s, sleeps for %s seconds" % (x,x)
time.sleep(x)
print "Process number %s finished" % x
q = Queue()
for i in range(10):
q.put(i)
i = Process(target=f, args=[q])
i.start()
print "main process joins on queue"
i.join()
print "Main Program finished"
|
test_channel.py
|
import io
import unittest
import pytest
class TestHTTPChannel(unittest.TestCase):
def _makeOne(self, sock, addr, adj, map=None):
from waitress.channel import HTTPChannel
server = DummyServer()
return HTTPChannel(server, sock, addr, adj=adj, map=map)
def _makeOneWithMap(self, adj=None):
if adj is None:
adj = DummyAdjustments()
sock = DummySock()
map = {}
inst = self._makeOne(sock, "127.0.0.1", adj, map=map)
inst.outbuf_lock = DummyLock()
return inst, sock, map
def test_ctor(self):
inst, _, map = self._makeOneWithMap()
self.assertEqual(inst.addr, "127.0.0.1")
self.assertEqual(inst.sendbuf_len, 2048)
self.assertEqual(map[100], inst)
def test_total_outbufs_len_an_outbuf_size_gt_sys_maxint(self):
from waitress.compat import MAXINT
inst, _, map = self._makeOneWithMap()
class DummyBuffer:
chunks = []
def append(self, data):
self.chunks.append(data)
class DummyData:
def __len__(self):
return MAXINT
inst.total_outbufs_len = 1
inst.outbufs = [DummyBuffer()]
inst.write_soon(DummyData())
# we are testing that this method does not raise an OverflowError
# (see https://github.com/Pylons/waitress/issues/47)
self.assertEqual(inst.total_outbufs_len, MAXINT + 1)
def test_writable_something_in_outbuf(self):
inst, sock, map = self._makeOneWithMap()
inst.total_outbufs_len = 3
self.assertTrue(inst.writable())
def test_writable_nothing_in_outbuf(self):
inst, sock, map = self._makeOneWithMap()
self.assertFalse(inst.writable())
def test_writable_nothing_in_outbuf_will_close(self):
inst, sock, map = self._makeOneWithMap()
inst.will_close = True
self.assertTrue(inst.writable())
def test_handle_write_not_connected(self):
inst, sock, map = self._makeOneWithMap()
inst.connected = False
self.assertFalse(inst.handle_write())
def test_handle_write_with_requests(self):
inst, sock, map = self._makeOneWithMap()
inst.requests = True
inst.last_activity = 0
result = inst.handle_write()
self.assertEqual(result, None)
self.assertEqual(inst.last_activity, 0)
def test_handle_write_no_request_with_outbuf(self):
inst, sock, map = self._makeOneWithMap()
inst.requests = []
inst.outbufs = [DummyBuffer(b"abc")]
inst.total_outbufs_len = len(inst.outbufs[0])
inst.last_activity = 0
result = inst.handle_write()
self.assertEqual(result, None)
self.assertNotEqual(inst.last_activity, 0)
self.assertEqual(sock.sent, b"abc")
def test_handle_write_outbuf_raises_socketerror(self):
import socket
inst, sock, map = self._makeOneWithMap()
inst.requests = []
outbuf = DummyBuffer(b"abc", socket.error)
inst.outbufs = [outbuf]
inst.total_outbufs_len = len(outbuf)
inst.last_activity = 0
inst.logger = DummyLogger()
result = inst.handle_write()
self.assertEqual(result, None)
self.assertEqual(inst.last_activity, 0)
self.assertEqual(sock.sent, b"")
self.assertEqual(len(inst.logger.exceptions), 1)
self.assertTrue(outbuf.closed)
def test_handle_write_outbuf_raises_othererror(self):
inst, sock, map = self._makeOneWithMap()
inst.requests = []
outbuf = DummyBuffer(b"abc", IOError)
inst.outbufs = [outbuf]
inst.total_outbufs_len = len(outbuf)
inst.last_activity = 0
inst.logger = DummyLogger()
result = inst.handle_write()
self.assertEqual(result, None)
self.assertEqual(inst.last_activity, 0)
self.assertEqual(sock.sent, b"")
self.assertEqual(len(inst.logger.exceptions), 1)
self.assertTrue(outbuf.closed)
def test_handle_write_no_requests_no_outbuf_will_close(self):
inst, sock, map = self._makeOneWithMap()
inst.requests = []
outbuf = DummyBuffer(b"")
inst.outbufs = [outbuf]
inst.will_close = True
inst.last_activity = 0
result = inst.handle_write()
self.assertEqual(result, None)
self.assertEqual(inst.connected, False)
self.assertEqual(sock.closed, True)
self.assertEqual(inst.last_activity, 0)
self.assertTrue(outbuf.closed)
def test_handle_write_no_requests_outbuf_gt_send_bytes(self):
inst, sock, map = self._makeOneWithMap()
inst.requests = [True]
inst.outbufs = [DummyBuffer(b"abc")]
inst.total_outbufs_len = len(inst.outbufs[0])
inst.adj.send_bytes = 2
inst.will_close = False
inst.last_activity = 0
result = inst.handle_write()
self.assertEqual(result, None)
self.assertEqual(inst.will_close, False)
self.assertTrue(inst.outbuf_lock.acquired)
self.assertEqual(sock.sent, b"abc")
def test_handle_write_close_when_flushed(self):
inst, sock, map = self._makeOneWithMap()
outbuf = DummyBuffer(b"abc")
inst.outbufs = [outbuf]
inst.total_outbufs_len = len(outbuf)
inst.will_close = False
inst.close_when_flushed = True
inst.last_activity = 0
result = inst.handle_write()
self.assertEqual(result, None)
self.assertEqual(inst.will_close, True)
self.assertEqual(inst.close_when_flushed, False)
self.assertEqual(sock.sent, b"abc")
self.assertTrue(outbuf.closed)
def test_readable_no_requests_not_will_close(self):
inst, sock, map = self._makeOneWithMap()
inst.requests = []
inst.will_close = False
self.assertEqual(inst.readable(), True)
def test_readable_no_requests_will_close(self):
inst, sock, map = self._makeOneWithMap()
inst.requests = []
inst.will_close = True
self.assertEqual(inst.readable(), False)
def test_readable_with_requests(self):
inst, sock, map = self._makeOneWithMap()
inst.requests = [True]
self.assertEqual(inst.readable(), False)
def test_handle_read_no_error(self):
inst, sock, map = self._makeOneWithMap()
inst.will_close = False
inst.recv = lambda *arg: b"abc"
inst.last_activity = 0
L = []
inst.received = lambda x: L.append(x)
result = inst.handle_read()
self.assertEqual(result, None)
self.assertNotEqual(inst.last_activity, 0)
self.assertEqual(L, [b"abc"])
def test_handle_read_error(self):
inst, sock, map = self._makeOneWithMap()
inst.will_close = False
def recv(b):
raise OSError
inst.recv = recv
inst.last_activity = 0
inst.logger = DummyLogger()
result = inst.handle_read()
self.assertEqual(result, None)
self.assertEqual(inst.last_activity, 0)
self.assertEqual(len(inst.logger.exceptions), 1)
def test_write_soon_empty_byte(self):
inst, sock, map = self._makeOneWithMap()
wrote = inst.write_soon(b"")
self.assertEqual(wrote, 0)
self.assertEqual(len(inst.outbufs[0]), 0)
def test_write_soon_nonempty_byte(self):
inst, sock, map = self._makeOneWithMap()
# _flush_some will no longer flush
def send(_):
return 0
sock.send = send
wrote = inst.write_soon(b"a")
self.assertEqual(wrote, 1)
self.assertEqual(len(inst.outbufs[0]), 1)
def test_write_soon_filewrapper(self):
from waitress.buffers import ReadOnlyFileBasedBuffer
f = io.BytesIO(b"abc")
wrapper = ReadOnlyFileBasedBuffer(f, 8192)
wrapper.prepare()
inst, sock, map = self._makeOneWithMap()
# _flush_some will no longer flush
def send(_):
return 0
sock.send = send
outbufs = inst.outbufs
wrote = inst.write_soon(wrapper)
self.assertEqual(wrote, 3)
self.assertEqual(len(outbufs), 2)
self.assertEqual(outbufs[0], wrapper)
self.assertEqual(outbufs[1].__class__.__name__, "OverflowableBuffer")
def test_write_soon_disconnected(self):
from waitress.channel import ClientDisconnected
inst, sock, map = self._makeOneWithMap()
inst.connected = False
self.assertRaises(ClientDisconnected, lambda: inst.write_soon(b"stuff"))
def test_write_soon_disconnected_while_over_watermark(self):
from waitress.channel import ClientDisconnected
inst, sock, map = self._makeOneWithMap()
def dummy_flush():
inst.connected = False
inst._flush_outbufs_below_high_watermark = dummy_flush
self.assertRaises(ClientDisconnected, lambda: inst.write_soon(b"stuff"))
def test_write_soon_rotates_outbuf_on_overflow(self):
inst, sock, map = self._makeOneWithMap()
# _flush_some will no longer flush
def send(_):
return 0
sock.send = send
inst.adj.outbuf_high_watermark = 3
inst.current_outbuf_count = 4
wrote = inst.write_soon(b"xyz")
self.assertEqual(wrote, 3)
self.assertEqual(len(inst.outbufs), 1)
self.assertEqual(inst.outbufs[0].get(), b"xyz")
def test_write_soon_waits_on_backpressure(self):
inst, sock, map = self._makeOneWithMap()
# _flush_some will no longer flush
def send(_):
return 0
sock.send = send
inst.adj.outbuf_high_watermark = 3
inst.total_outbufs_len = 4
inst.current_outbuf_count = 4
class Lock(DummyLock):
def wait(self):
inst.total_outbufs_len = 0
super().wait()
inst.outbuf_lock = Lock()
wrote = inst.write_soon(b"xyz")
self.assertEqual(wrote, 3)
self.assertEqual(len(inst.outbufs), 1)
self.assertEqual(inst.outbufs[0].get(), b"xyz")
self.assertTrue(inst.outbuf_lock.waited)
def test_write_soon_attempts_flush_high_water_and_exception(self):
from waitress.channel import ClientDisconnected
inst, sock, map = self._makeOneWithMap()
# _flush_some will no longer flush, it will raise Exception, which
# disconnects the remote end
def send(_):
inst.connected = False
raise Exception()
sock.send = send
inst.adj.outbuf_high_watermark = 3
inst.total_outbufs_len = 4
inst.current_outbuf_count = 4
inst.outbufs[0].append(b"test")
class Lock(DummyLock):
def wait(self):
inst.total_outbufs_len = 0
super().wait()
inst.outbuf_lock = Lock()
self.assertRaises(ClientDisconnected, lambda: inst.write_soon(b"xyz"))
# Validate we woke up the main thread to deal with the exception of
# trying to send
self.assertTrue(inst.outbuf_lock.waited)
self.assertTrue(inst.server.trigger_pulled)
def test_write_soon_flush_and_exception(self):
inst, sock, map = self._makeOneWithMap()
# _flush_some will no longer flush, it will raise Exception, which
# disconnects the remote end
def send(_):
inst.connected = False
raise Exception()
sock.send = send
wrote = inst.write_soon(b"xyz")
self.assertEqual(wrote, 3)
# Validate we woke up the main thread to deal with the exception of
# trying to send
self.assertTrue(inst.server.trigger_pulled)
def test_handle_write_notify_after_flush(self):
inst, sock, map = self._makeOneWithMap()
inst.requests = [True]
inst.outbufs = [DummyBuffer(b"abc")]
inst.total_outbufs_len = len(inst.outbufs[0])
inst.adj.send_bytes = 1
inst.adj.outbuf_high_watermark = 5
inst.will_close = False
inst.last_activity = 0
result = inst.handle_write()
self.assertEqual(result, None)
self.assertEqual(inst.will_close, False)
self.assertTrue(inst.outbuf_lock.acquired)
self.assertTrue(inst.outbuf_lock.notified)
self.assertEqual(sock.sent, b"abc")
def test_handle_write_no_notify_after_flush(self):
inst, sock, map = self._makeOneWithMap()
inst.requests = [True]
inst.outbufs = [DummyBuffer(b"abc")]
inst.total_outbufs_len = len(inst.outbufs[0])
inst.adj.send_bytes = 1
inst.adj.outbuf_high_watermark = 2
sock.send = lambda x: False
inst.will_close = False
inst.last_activity = 0
result = inst.handle_write()
self.assertEqual(result, None)
self.assertEqual(inst.will_close, False)
self.assertTrue(inst.outbuf_lock.acquired)
self.assertFalse(inst.outbuf_lock.notified)
self.assertEqual(sock.sent, b"")
def test__flush_some_empty_outbuf(self):
inst, sock, map = self._makeOneWithMap()
result = inst._flush_some()
self.assertEqual(result, False)
def test__flush_some_full_outbuf_socket_returns_nonzero(self):
inst, sock, map = self._makeOneWithMap()
inst.outbufs[0].append(b"abc")
inst.total_outbufs_len = sum(len(x) for x in inst.outbufs)
result = inst._flush_some()
self.assertEqual(result, True)
def test__flush_some_full_outbuf_socket_returns_zero(self):
inst, sock, map = self._makeOneWithMap()
sock.send = lambda x: False
inst.outbufs[0].append(b"abc")
inst.total_outbufs_len = sum(len(x) for x in inst.outbufs)
result = inst._flush_some()
self.assertEqual(result, False)
def test_flush_some_multiple_buffers_first_empty(self):
inst, sock, map = self._makeOneWithMap()
sock.send = lambda x: len(x)
buffer = DummyBuffer(b"abc")
inst.outbufs.append(buffer)
inst.total_outbufs_len = sum(len(x) for x in inst.outbufs)
result = inst._flush_some()
self.assertEqual(result, True)
self.assertEqual(buffer.skipped, 3)
self.assertEqual(inst.outbufs, [buffer])
def test_flush_some_multiple_buffers_close_raises(self):
inst, sock, map = self._makeOneWithMap()
sock.send = lambda x: len(x)
buffer = DummyBuffer(b"abc")
inst.outbufs.append(buffer)
inst.total_outbufs_len = sum(len(x) for x in inst.outbufs)
inst.logger = DummyLogger()
def doraise():
raise NotImplementedError
inst.outbufs[0].close = doraise
result = inst._flush_some()
self.assertEqual(result, True)
self.assertEqual(buffer.skipped, 3)
self.assertEqual(inst.outbufs, [buffer])
self.assertEqual(len(inst.logger.exceptions), 1)
def test__flush_some_outbuf_len_gt_sys_maxint(self):
from waitress.compat import MAXINT
inst, sock, map = self._makeOneWithMap()
class DummyHugeOutbuffer:
def __init__(self):
self.length = MAXINT + 1
def __len__(self):
return self.length
def get(self, numbytes):
self.length = 0
return b"123"
buf = DummyHugeOutbuffer()
inst.outbufs = [buf]
inst.send = lambda *arg: 0
result = inst._flush_some()
# we are testing that _flush_some doesn't raise an OverflowError
# when one of its outbufs has a __len__ that returns gt sys.maxint
self.assertEqual(result, False)
def test_handle_close(self):
inst, sock, map = self._makeOneWithMap()
inst.handle_close()
self.assertEqual(inst.connected, False)
self.assertEqual(sock.closed, True)
def test_handle_close_outbuf_raises_on_close(self):
inst, sock, map = self._makeOneWithMap()
def doraise():
raise NotImplementedError
inst.outbufs[0].close = doraise
inst.logger = DummyLogger()
inst.handle_close()
self.assertEqual(inst.connected, False)
self.assertEqual(sock.closed, True)
self.assertEqual(len(inst.logger.exceptions), 1)
def test_add_channel(self):
inst, sock, map = self._makeOneWithMap()
fileno = inst._fileno
inst.add_channel(map)
self.assertEqual(map[fileno], inst)
self.assertEqual(inst.server.active_channels[fileno], inst)
def test_del_channel(self):
inst, sock, map = self._makeOneWithMap()
fileno = inst._fileno
inst.server.active_channels[fileno] = True
inst.del_channel(map)
self.assertEqual(map.get(fileno), None)
self.assertEqual(inst.server.active_channels.get(fileno), None)
def test_received(self):
inst, sock, map = self._makeOneWithMap()
inst.server = DummyServer()
inst.received(b"GET / HTTP/1.1\r\n\r\n")
self.assertEqual(inst.server.tasks, [inst])
self.assertTrue(inst.requests)
def test_received_no_chunk(self):
inst, sock, map = self._makeOneWithMap()
self.assertEqual(inst.received(b""), False)
def test_received_preq_not_completed(self):
inst, sock, map = self._makeOneWithMap()
inst.server = DummyServer()
preq = DummyParser()
inst.request = preq
preq.completed = False
preq.empty = True
inst.received(b"GET / HTTP/1.1\r\n\r\n")
self.assertEqual(inst.requests, [])
self.assertEqual(inst.server.tasks, [])
def test_received_preq_completed_empty(self):
inst, sock, map = self._makeOneWithMap()
inst.server = DummyServer()
preq = DummyParser()
inst.request = preq
preq.completed = True
preq.empty = True
inst.received(b"GET / HTTP/1.1\r\n\r\n")
self.assertEqual(inst.request, None)
self.assertEqual(inst.server.tasks, [])
def test_received_preq_error(self):
inst, sock, map = self._makeOneWithMap()
inst.server = DummyServer()
preq = DummyParser()
inst.request = preq
preq.completed = True
preq.error = True
inst.received(b"GET / HTTP/1.1\r\n\r\n")
self.assertEqual(inst.request, None)
self.assertEqual(len(inst.server.tasks), 1)
self.assertTrue(inst.requests)
def test_received_preq_completed_connection_close(self):
inst, sock, map = self._makeOneWithMap()
inst.server = DummyServer()
preq = DummyParser()
inst.request = preq
preq.completed = True
preq.empty = True
preq.connection_close = True
inst.received(b"GET / HTTP/1.1\r\n\r\n" + b"a" * 50000)
self.assertEqual(inst.request, None)
self.assertEqual(inst.server.tasks, [])
def test_received_headers_finished_expect_continue_false(self):
inst, sock, map = self._makeOneWithMap()
inst.server = DummyServer()
preq = DummyParser()
inst.request = preq
preq.expect_continue = False
preq.headers_finished = True
preq.completed = False
preq.empty = False
preq.retval = 1
inst.received(b"GET / HTTP/1.1\r\n\r\n")
self.assertEqual(inst.request, preq)
self.assertEqual(inst.server.tasks, [])
self.assertEqual(inst.outbufs[0].get(100), b"")
def test_received_headers_finished_expect_continue_true(self):
inst, sock, map = self._makeOneWithMap()
inst.server = DummyServer()
preq = DummyParser()
inst.request = preq
preq.expect_continue = True
preq.headers_finished = True
preq.completed = False
preq.empty = False
inst.received(b"GET / HTTP/1.1\r\n\r\n")
self.assertEqual(inst.request, preq)
self.assertEqual(inst.server.tasks, [])
self.assertEqual(sock.sent, b"HTTP/1.1 100 Continue\r\n\r\n")
self.assertEqual(inst.sent_continue, True)
self.assertEqual(preq.completed, False)
def test_received_headers_finished_expect_continue_true_sent_true(self):
inst, sock, map = self._makeOneWithMap()
inst.server = DummyServer()
preq = DummyParser()
inst.request = preq
preq.expect_continue = True
preq.headers_finished = True
preq.completed = False
preq.empty = False
inst.sent_continue = True
inst.received(b"GET / HTTP/1.1\r\n\r\n")
self.assertEqual(inst.request, preq)
self.assertEqual(inst.server.tasks, [])
self.assertEqual(sock.sent, b"")
self.assertEqual(inst.sent_continue, True)
self.assertEqual(preq.completed, False)
def test_service_with_one_request(self):
inst, sock, map = self._makeOneWithMap()
request = DummyRequest()
inst.task_class = DummyTaskClass()
inst.requests = [request]
inst.service()
self.assertEqual(inst.requests, [])
self.assertTrue(request.serviced)
self.assertTrue(request.closed)
def test_service_with_one_error_request(self):
inst, sock, map = self._makeOneWithMap()
request = DummyRequest()
request.error = DummyError()
inst.error_task_class = DummyTaskClass()
inst.requests = [request]
inst.service()
self.assertEqual(inst.requests, [])
self.assertTrue(request.serviced)
self.assertTrue(request.closed)
def test_service_with_multiple_requests(self):
inst, sock, map = self._makeOneWithMap()
request1 = DummyRequest()
request2 = DummyRequest()
inst.task_class = DummyTaskClass()
inst.requests = [request1, request2]
inst.service()
inst.service()
self.assertEqual(inst.requests, [])
self.assertTrue(request1.serviced)
self.assertTrue(request2.serviced)
self.assertTrue(request1.closed)
self.assertTrue(request2.closed)
def test_service_with_request_raises(self):
inst, sock, map = self._makeOneWithMap()
inst.adj.expose_tracebacks = False
inst.server = DummyServer()
request = DummyRequest()
inst.requests = [request]
inst.task_class = DummyTaskClass(ValueError)
inst.task_class.wrote_header = False
inst.error_task_class = DummyTaskClass()
inst.logger = DummyLogger()
inst.service()
self.assertTrue(request.serviced)
self.assertEqual(inst.requests, [])
self.assertEqual(len(inst.logger.exceptions), 1)
self.assertTrue(inst.server.trigger_pulled)
self.assertTrue(inst.last_activity)
self.assertFalse(inst.will_close)
self.assertEqual(inst.error_task_class.serviced, True)
self.assertTrue(request.closed)
def test_service_with_requests_raises_already_wrote_header(self):
inst, sock, map = self._makeOneWithMap()
inst.adj.expose_tracebacks = False
inst.server = DummyServer()
request = DummyRequest()
inst.requests = [request]
inst.task_class = DummyTaskClass(ValueError)
inst.error_task_class = DummyTaskClass()
inst.logger = DummyLogger()
inst.service()
self.assertTrue(request.serviced)
self.assertEqual(inst.requests, [])
self.assertEqual(len(inst.logger.exceptions), 1)
self.assertTrue(inst.server.trigger_pulled)
self.assertTrue(inst.last_activity)
self.assertTrue(inst.close_when_flushed)
self.assertEqual(inst.error_task_class.serviced, False)
self.assertTrue(request.closed)
def test_service_with_requests_raises_didnt_write_header_expose_tbs(self):
inst, sock, map = self._makeOneWithMap()
inst.adj.expose_tracebacks = True
inst.server = DummyServer()
request = DummyRequest()
inst.requests = [request]
inst.task_class = DummyTaskClass(ValueError)
inst.task_class.wrote_header = False
inst.error_task_class = DummyTaskClass()
inst.logger = DummyLogger()
inst.service()
self.assertTrue(request.serviced)
self.assertFalse(inst.will_close)
self.assertEqual(inst.requests, [])
self.assertEqual(len(inst.logger.exceptions), 1)
self.assertTrue(inst.server.trigger_pulled)
self.assertTrue(inst.last_activity)
self.assertEqual(inst.error_task_class.serviced, True)
self.assertTrue(request.closed)
def test_service_with_requests_raises_didnt_write_header(self):
inst, sock, map = self._makeOneWithMap()
inst.adj.expose_tracebacks = False
inst.server = DummyServer()
request = DummyRequest()
inst.requests = [request]
inst.task_class = DummyTaskClass(ValueError)
inst.task_class.wrote_header = False
inst.logger = DummyLogger()
inst.service()
self.assertTrue(request.serviced)
self.assertEqual(inst.requests, [])
self.assertEqual(len(inst.logger.exceptions), 1)
self.assertTrue(inst.server.trigger_pulled)
self.assertTrue(inst.last_activity)
self.assertTrue(inst.close_when_flushed)
self.assertTrue(request.closed)
def test_service_with_request_raises_disconnect(self):
from waitress.channel import ClientDisconnected
inst, sock, map = self._makeOneWithMap()
inst.adj.expose_tracebacks = False
inst.server = DummyServer()
request = DummyRequest()
inst.requests = [request]
inst.task_class = DummyTaskClass(ClientDisconnected)
inst.error_task_class = DummyTaskClass()
inst.logger = DummyLogger()
inst.service()
self.assertTrue(request.serviced)
self.assertEqual(inst.requests, [])
self.assertEqual(len(inst.logger.infos), 1)
self.assertTrue(inst.server.trigger_pulled)
self.assertTrue(inst.last_activity)
self.assertFalse(inst.will_close)
self.assertEqual(inst.error_task_class.serviced, False)
self.assertTrue(request.closed)
def test_service_with_request_error_raises_disconnect(self):
from waitress.channel import ClientDisconnected
inst, sock, map = self._makeOneWithMap()
inst.adj.expose_tracebacks = False
inst.server = DummyServer()
request = DummyRequest()
err_request = DummyRequest()
inst.requests = [request]
inst.parser_class = lambda x: err_request
inst.task_class = DummyTaskClass(RuntimeError)
inst.task_class.wrote_header = False
inst.error_task_class = DummyTaskClass(ClientDisconnected)
inst.logger = DummyLogger()
inst.service()
self.assertTrue(request.serviced)
self.assertTrue(err_request.serviced)
self.assertEqual(inst.requests, [])
self.assertEqual(len(inst.logger.exceptions), 1)
self.assertEqual(len(inst.logger.infos), 0)
self.assertTrue(inst.server.trigger_pulled)
self.assertTrue(inst.last_activity)
self.assertFalse(inst.will_close)
self.assertEqual(inst.task_class.serviced, True)
self.assertEqual(inst.error_task_class.serviced, True)
self.assertTrue(request.closed)
def test_cancel_no_requests(self):
inst, sock, map = self._makeOneWithMap()
inst.requests = ()
inst.cancel()
self.assertEqual(inst.requests, [])
def test_cancel_with_requests(self):
inst, sock, map = self._makeOneWithMap()
inst.requests = [None]
inst.cancel()
self.assertEqual(inst.requests, [])
class TestHTTPChannelLookahead(TestHTTPChannel):
def app_check_disconnect(self, environ, start_response):
"""
Application that checks for client disconnection every
second for up to two seconds.
"""
import time
if hasattr(self, "app_started"):
self.app_started.set()
try:
request_body_size = int(environ.get("CONTENT_LENGTH", 0))
except ValueError:
request_body_size = 0
self.request_body = environ["wsgi.input"].read(request_body_size)
self.disconnect_detected = False
check = environ["waitress.client_disconnected"]
if environ["PATH_INFO"] == "/sleep":
for i in range(3):
if i != 0:
time.sleep(1)
if check():
self.disconnect_detected = True
break
body = b"finished"
cl = str(len(body))
start_response(
"200 OK", [("Content-Length", cl), ("Content-Type", "text/plain")]
)
return [body]
def _make_app_with_lookahead(self):
"""
Setup a channel with lookahead and store it and the socket in self
"""
adj = DummyAdjustments()
adj.channel_request_lookahead = 5
channel, sock, map = self._makeOneWithMap(adj=adj)
channel.server.application = self.app_check_disconnect
self.channel = channel
self.sock = sock
def _send(self, *lines):
"""
Send lines through the socket with correct line endings
"""
self.sock.send("".join(line + "\r\n" for line in lines).encode("ascii"))
def test_client_disconnect(self, close_before_start=False):
"""Disconnect the socket after starting the task."""
import threading
self._make_app_with_lookahead()
self._send(
"GET /sleep HTTP/1.1",
"Host: localhost:8080",
"",
)
self.assertTrue(self.channel.readable())
self.channel.handle_read()
self.assertEqual(len(self.channel.server.tasks), 1)
self.app_started = threading.Event()
self.disconnect_detected = False
thread = threading.Thread(target=self.channel.server.tasks[0].service)
if not close_before_start:
thread.start()
self.assertTrue(self.app_started.wait(timeout=5))
# Close the socket, check that the channel is still readable due to the
# lookahead and read it, which marks the channel as closed.
self.sock.close()
self.assertTrue(self.channel.readable())
self.channel.handle_read()
if close_before_start:
thread.start()
thread.join()
if close_before_start:
self.assertFalse(self.app_started.is_set())
else:
self.assertTrue(self.disconnect_detected)
def test_client_disconnect_immediate(self):
"""
The same test, but this time we close the socket even before processing
started. The app should not be executed.
"""
self.test_client_disconnect(close_before_start=True)
def test_lookahead_continue(self):
"""
Send two requests to a channel with lookahead and use an
expect-continue on the second one, making sure the responses still come
in the right order.
"""
self._make_app_with_lookahead()
self._send(
"POST / HTTP/1.1",
"Host: localhost:8080",
"Content-Length: 1",
"",
"x",
"POST / HTTP/1.1",
"Host: localhost:8080",
"Content-Length: 1",
"Expect: 100-continue",
"",
)
self.channel.handle_read()
self.assertEqual(len(self.channel.requests), 1)
self.channel.server.tasks[0].service()
data = self.sock.recv(256).decode("ascii")
self.assertTrue(data.endswith("HTTP/1.1 100 Continue\r\n\r\n"))
self.sock.send(b"x")
self.channel.handle_read()
self.assertEqual(len(self.channel.requests), 1)
self.channel.server.tasks[0].service()
self.channel._flush_some()
data = self.sock.recv(256).decode("ascii")
self.assertEqual(data.split("\r\n")[-1], "finished")
self.assertEqual(self.request_body, b"x")
class DummySock:
blocking = False
closed = False
def __init__(self):
self.sent = b""
def setblocking(self, *arg):
self.blocking = True
def fileno(self):
return 100
def getpeername(self):
return "127.0.0.1"
def getsockopt(self, level, option):
return 2048
def close(self):
self.closed = True
def send(self, data):
self.sent += data
return len(data)
def recv(self, buffer_size):
result = self.sent[:buffer_size]
self.sent = self.sent[buffer_size:]
return result
class DummyLock:
notified = False
def __init__(self, acquirable=True):
self.acquirable = acquirable
def acquire(self, val):
self.val = val
self.acquired = True
return self.acquirable
def release(self):
self.released = True
def notify(self):
self.notified = True
def wait(self):
self.waited = True
def __exit__(self, type, val, traceback):
self.acquire(True)
def __enter__(self):
pass
class DummyBuffer:
closed = False
def __init__(self, data, toraise=None):
self.data = data
self.toraise = toraise
def get(self, *arg):
if self.toraise:
raise self.toraise
data = self.data
self.data = b""
return data
def skip(self, num, x):
self.skipped = num
def __len__(self):
return len(self.data)
def close(self):
self.closed = True
class DummyAdjustments:
outbuf_overflow = 1048576
outbuf_high_watermark = 1048576
inbuf_overflow = 512000
cleanup_interval = 900
url_scheme = "http"
channel_timeout = 300
log_socket_errors = True
recv_bytes = 8192
send_bytes = 1
expose_tracebacks = True
ident = "waitress"
max_request_header_size = 10000
url_prefix = ""
channel_request_lookahead = 0
max_request_body_size = 1048576
class DummyServer:
trigger_pulled = False
adj = DummyAdjustments()
effective_port = 8080
server_name = ""
def __init__(self):
self.tasks = []
self.active_channels = {}
def add_task(self, task):
self.tasks.append(task)
def pull_trigger(self):
self.trigger_pulled = True
class DummyParser:
version = 1
data = None
completed = True
empty = False
headers_finished = False
expect_continue = False
retval = None
error = None
connection_close = False
def received(self, data):
self.data = data
if self.retval is not None:
return self.retval
return len(data)
class DummyRequest:
error = None
path = "/"
version = "1.0"
closed = False
def __init__(self):
self.headers = {}
def close(self):
self.closed = True
class DummyLogger:
def __init__(self):
self.exceptions = []
self.infos = []
self.warnings = []
def info(self, msg):
self.infos.append(msg)
def exception(self, msg):
self.exceptions.append(msg)
class DummyError:
code = "431"
reason = "Bleh"
body = "My body"
class DummyTaskClass:
wrote_header = True
close_on_finish = False
serviced = False
def __init__(self, toraise=None):
self.toraise = toraise
def __call__(self, channel, request):
self.request = request
return self
def service(self):
self.serviced = True
self.request.serviced = True
if self.toraise:
raise self.toraise
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.